check.c 101 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2015-2017 Josh Poimboeuf <[email protected]>
  4. */
  5. #include <string.h>
  6. #include <stdlib.h>
  7. #include <inttypes.h>
  8. #include <sys/mman.h>
  9. #include <arch/elf.h>
  10. #include <objtool/builtin.h>
  11. #include <objtool/cfi.h>
  12. #include <objtool/arch.h>
  13. #include <objtool/check.h>
  14. #include <objtool/special.h>
  15. #include <objtool/warn.h>
  16. #include <objtool/endianness.h>
  17. #include <linux/objtool.h>
  18. #include <linux/hashtable.h>
  19. #include <linux/kernel.h>
  20. #include <linux/static_call_types.h>
  21. struct alternative {
  22. struct list_head list;
  23. struct instruction *insn;
  24. bool skip_orig;
  25. };
  26. static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
  27. static struct cfi_init_state initial_func_cfi;
  28. static struct cfi_state init_cfi;
  29. static struct cfi_state func_cfi;
  30. struct instruction *find_insn(struct objtool_file *file,
  31. struct section *sec, unsigned long offset)
  32. {
  33. struct instruction *insn;
  34. hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
  35. if (insn->sec == sec && insn->offset == offset)
  36. return insn;
  37. }
  38. return NULL;
  39. }
  40. static struct instruction *next_insn_same_sec(struct objtool_file *file,
  41. struct instruction *insn)
  42. {
  43. struct instruction *next = list_next_entry(insn, list);
  44. if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
  45. return NULL;
  46. return next;
  47. }
  48. static struct instruction *next_insn_same_func(struct objtool_file *file,
  49. struct instruction *insn)
  50. {
  51. struct instruction *next = list_next_entry(insn, list);
  52. struct symbol *func = insn->func;
  53. if (!func)
  54. return NULL;
  55. if (&next->list != &file->insn_list && next->func == func)
  56. return next;
  57. /* Check if we're already in the subfunction: */
  58. if (func == func->cfunc)
  59. return NULL;
  60. /* Move to the subfunction: */
  61. return find_insn(file, func->cfunc->sec, func->cfunc->offset);
  62. }
  63. static struct instruction *prev_insn_same_sym(struct objtool_file *file,
  64. struct instruction *insn)
  65. {
  66. struct instruction *prev = list_prev_entry(insn, list);
  67. if (&prev->list != &file->insn_list && prev->func == insn->func)
  68. return prev;
  69. return NULL;
  70. }
  71. #define func_for_each_insn(file, func, insn) \
  72. for (insn = find_insn(file, func->sec, func->offset); \
  73. insn; \
  74. insn = next_insn_same_func(file, insn))
  75. #define sym_for_each_insn(file, sym, insn) \
  76. for (insn = find_insn(file, sym->sec, sym->offset); \
  77. insn && &insn->list != &file->insn_list && \
  78. insn->sec == sym->sec && \
  79. insn->offset < sym->offset + sym->len; \
  80. insn = list_next_entry(insn, list))
  81. #define sym_for_each_insn_continue_reverse(file, sym, insn) \
  82. for (insn = list_prev_entry(insn, list); \
  83. &insn->list != &file->insn_list && \
  84. insn->sec == sym->sec && insn->offset >= sym->offset; \
  85. insn = list_prev_entry(insn, list))
  86. #define sec_for_each_insn_from(file, insn) \
  87. for (; insn; insn = next_insn_same_sec(file, insn))
  88. #define sec_for_each_insn_continue(file, insn) \
  89. for (insn = next_insn_same_sec(file, insn); insn; \
  90. insn = next_insn_same_sec(file, insn))
  91. static bool is_jump_table_jump(struct instruction *insn)
  92. {
  93. struct alt_group *alt_group = insn->alt_group;
  94. if (insn->jump_table)
  95. return true;
  96. /* Retpoline alternative for a jump table? */
  97. return alt_group && alt_group->orig_group &&
  98. alt_group->orig_group->first_insn->jump_table;
  99. }
  100. static bool is_sibling_call(struct instruction *insn)
  101. {
  102. /*
  103. * Assume only ELF functions can make sibling calls. This ensures
  104. * sibling call detection consistency between vmlinux.o and individual
  105. * objects.
  106. */
  107. if (!insn->func)
  108. return false;
  109. /* An indirect jump is either a sibling call or a jump to a table. */
  110. if (insn->type == INSN_JUMP_DYNAMIC)
  111. return !is_jump_table_jump(insn);
  112. /* add_jump_destinations() sets insn->call_dest for sibling calls. */
  113. return (is_static_jump(insn) && insn->call_dest);
  114. }
  115. /*
  116. * This checks to see if the given function is a "noreturn" function.
  117. *
  118. * For global functions which are outside the scope of this object file, we
  119. * have to keep a manual list of them.
  120. *
  121. * For local functions, we have to detect them manually by simply looking for
  122. * the lack of a return instruction.
  123. */
  124. static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
  125. int recursion)
  126. {
  127. int i;
  128. struct instruction *insn;
  129. bool empty = true;
  130. /*
  131. * Unfortunately these have to be hard coded because the noreturn
  132. * attribute isn't provided in ELF data. Keep 'em sorted.
  133. */
  134. static const char * const global_noreturns[] = {
  135. "__invalid_creds",
  136. "__module_put_and_kthread_exit",
  137. "__reiserfs_panic",
  138. "__stack_chk_fail",
  139. "__ubsan_handle_builtin_unreachable",
  140. "cpu_bringup_and_idle",
  141. "cpu_startup_entry",
  142. "do_exit",
  143. "do_group_exit",
  144. "do_task_dead",
  145. "ex_handler_msr_mce",
  146. "fortify_panic",
  147. "kthread_complete_and_exit",
  148. "kthread_exit",
  149. "kunit_try_catch_throw",
  150. "lbug_with_loc",
  151. "machine_real_restart",
  152. "make_task_dead",
  153. "panic",
  154. "rewind_stack_and_make_dead",
  155. "sev_es_terminate",
  156. "snp_abort",
  157. "stop_this_cpu",
  158. "usercopy_abort",
  159. "xen_start_kernel",
  160. };
  161. if (!func)
  162. return false;
  163. if (func->bind == STB_WEAK)
  164. return false;
  165. if (func->bind == STB_GLOBAL)
  166. for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
  167. if (!strcmp(func->name, global_noreturns[i]))
  168. return true;
  169. if (!func->len)
  170. return false;
  171. insn = find_insn(file, func->sec, func->offset);
  172. if (!insn || !insn->func)
  173. return false;
  174. func_for_each_insn(file, func, insn) {
  175. empty = false;
  176. if (insn->type == INSN_RETURN)
  177. return false;
  178. }
  179. if (empty)
  180. return false;
  181. /*
  182. * A function can have a sibling call instead of a return. In that
  183. * case, the function's dead-end status depends on whether the target
  184. * of the sibling call returns.
  185. */
  186. func_for_each_insn(file, func, insn) {
  187. if (is_sibling_call(insn)) {
  188. struct instruction *dest = insn->jump_dest;
  189. if (!dest)
  190. /* sibling call to another file */
  191. return false;
  192. /* local sibling call */
  193. if (recursion == 5) {
  194. /*
  195. * Infinite recursion: two functions have
  196. * sibling calls to each other. This is a very
  197. * rare case. It means they aren't dead ends.
  198. */
  199. return false;
  200. }
  201. return __dead_end_function(file, dest->func, recursion+1);
  202. }
  203. }
  204. return true;
  205. }
  206. static bool dead_end_function(struct objtool_file *file, struct symbol *func)
  207. {
  208. return __dead_end_function(file, func, 0);
  209. }
  210. static void init_cfi_state(struct cfi_state *cfi)
  211. {
  212. int i;
  213. for (i = 0; i < CFI_NUM_REGS; i++) {
  214. cfi->regs[i].base = CFI_UNDEFINED;
  215. cfi->vals[i].base = CFI_UNDEFINED;
  216. }
  217. cfi->cfa.base = CFI_UNDEFINED;
  218. cfi->drap_reg = CFI_UNDEFINED;
  219. cfi->drap_offset = -1;
  220. }
  221. static void init_insn_state(struct objtool_file *file, struct insn_state *state,
  222. struct section *sec)
  223. {
  224. memset(state, 0, sizeof(*state));
  225. init_cfi_state(&state->cfi);
  226. /*
  227. * We need the full vmlinux for noinstr validation, otherwise we can
  228. * not correctly determine insn->call_dest->sec (external symbols do
  229. * not have a section).
  230. */
  231. if (opts.link && opts.noinstr && sec)
  232. state->noinstr = sec->noinstr;
  233. }
  234. static struct cfi_state *cfi_alloc(void)
  235. {
  236. struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
  237. if (!cfi) {
  238. WARN("calloc failed");
  239. exit(1);
  240. }
  241. nr_cfi++;
  242. return cfi;
  243. }
  244. static int cfi_bits;
  245. static struct hlist_head *cfi_hash;
  246. static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
  247. {
  248. return memcmp((void *)cfi1 + sizeof(cfi1->hash),
  249. (void *)cfi2 + sizeof(cfi2->hash),
  250. sizeof(struct cfi_state) - sizeof(struct hlist_node));
  251. }
  252. static inline u32 cfi_key(struct cfi_state *cfi)
  253. {
  254. return jhash((void *)cfi + sizeof(cfi->hash),
  255. sizeof(*cfi) - sizeof(cfi->hash), 0);
  256. }
  257. static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
  258. {
  259. struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
  260. struct cfi_state *obj;
  261. hlist_for_each_entry(obj, head, hash) {
  262. if (!cficmp(cfi, obj)) {
  263. nr_cfi_cache++;
  264. return obj;
  265. }
  266. }
  267. obj = cfi_alloc();
  268. *obj = *cfi;
  269. hlist_add_head(&obj->hash, head);
  270. return obj;
  271. }
  272. static void cfi_hash_add(struct cfi_state *cfi)
  273. {
  274. struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
  275. hlist_add_head(&cfi->hash, head);
  276. }
  277. static void *cfi_hash_alloc(unsigned long size)
  278. {
  279. cfi_bits = max(10, ilog2(size));
  280. cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
  281. PROT_READ|PROT_WRITE,
  282. MAP_PRIVATE|MAP_ANON, -1, 0);
  283. if (cfi_hash == (void *)-1L) {
  284. WARN("mmap fail cfi_hash");
  285. cfi_hash = NULL;
  286. } else if (opts.stats) {
  287. printf("cfi_bits: %d\n", cfi_bits);
  288. }
  289. return cfi_hash;
  290. }
  291. static unsigned long nr_insns;
  292. static unsigned long nr_insns_visited;
  293. /*
  294. * Call the arch-specific instruction decoder for all the instructions and add
  295. * them to the global instruction list.
  296. */
  297. static int decode_instructions(struct objtool_file *file)
  298. {
  299. struct section *sec;
  300. struct symbol *func;
  301. unsigned long offset;
  302. struct instruction *insn;
  303. int ret;
  304. for_each_sec(file, sec) {
  305. if (!(sec->sh.sh_flags & SHF_EXECINSTR))
  306. continue;
  307. if (strcmp(sec->name, ".altinstr_replacement") &&
  308. strcmp(sec->name, ".altinstr_aux") &&
  309. strncmp(sec->name, ".discard.", 9))
  310. sec->text = true;
  311. if (!strcmp(sec->name, ".noinstr.text") ||
  312. !strcmp(sec->name, ".entry.text") ||
  313. !strncmp(sec->name, ".text..__x86.", 13))
  314. sec->noinstr = true;
  315. for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
  316. insn = malloc(sizeof(*insn));
  317. if (!insn) {
  318. WARN("malloc failed");
  319. return -1;
  320. }
  321. memset(insn, 0, sizeof(*insn));
  322. INIT_LIST_HEAD(&insn->alts);
  323. INIT_LIST_HEAD(&insn->stack_ops);
  324. INIT_LIST_HEAD(&insn->call_node);
  325. insn->sec = sec;
  326. insn->offset = offset;
  327. ret = arch_decode_instruction(file, sec, offset,
  328. sec->sh.sh_size - offset,
  329. &insn->len, &insn->type,
  330. &insn->immediate,
  331. &insn->stack_ops);
  332. if (ret)
  333. goto err;
  334. /*
  335. * By default, "ud2" is a dead end unless otherwise
  336. * annotated, because GCC 7 inserts it for certain
  337. * divide-by-zero cases.
  338. */
  339. if (insn->type == INSN_BUG)
  340. insn->dead_end = true;
  341. hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
  342. list_add_tail(&insn->list, &file->insn_list);
  343. nr_insns++;
  344. }
  345. list_for_each_entry(func, &sec->symbol_list, list) {
  346. if (func->type != STT_FUNC || func->alias != func)
  347. continue;
  348. if (!find_insn(file, sec, func->offset)) {
  349. WARN("%s(): can't find starting instruction",
  350. func->name);
  351. return -1;
  352. }
  353. sym_for_each_insn(file, func, insn) {
  354. insn->func = func;
  355. if (insn->type == INSN_ENDBR && list_empty(&insn->call_node)) {
  356. if (insn->offset == insn->func->offset) {
  357. list_add_tail(&insn->call_node, &file->endbr_list);
  358. file->nr_endbr++;
  359. } else {
  360. file->nr_endbr_int++;
  361. }
  362. }
  363. }
  364. }
  365. }
  366. if (opts.stats)
  367. printf("nr_insns: %lu\n", nr_insns);
  368. return 0;
  369. err:
  370. free(insn);
  371. return ret;
  372. }
  373. /*
  374. * Read the pv_ops[] .data table to find the static initialized values.
  375. */
  376. static int add_pv_ops(struct objtool_file *file, const char *symname)
  377. {
  378. struct symbol *sym, *func;
  379. unsigned long off, end;
  380. struct reloc *rel;
  381. int idx;
  382. sym = find_symbol_by_name(file->elf, symname);
  383. if (!sym)
  384. return 0;
  385. off = sym->offset;
  386. end = off + sym->len;
  387. for (;;) {
  388. rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
  389. if (!rel)
  390. break;
  391. func = rel->sym;
  392. if (func->type == STT_SECTION)
  393. func = find_symbol_by_offset(rel->sym->sec, rel->addend);
  394. idx = (rel->offset - sym->offset) / sizeof(unsigned long);
  395. objtool_pv_add(file, idx, func);
  396. off = rel->offset + 1;
  397. if (off > end)
  398. break;
  399. }
  400. return 0;
  401. }
  402. /*
  403. * Allocate and initialize file->pv_ops[].
  404. */
  405. static int init_pv_ops(struct objtool_file *file)
  406. {
  407. static const char *pv_ops_tables[] = {
  408. "pv_ops",
  409. "xen_cpu_ops",
  410. "xen_irq_ops",
  411. "xen_mmu_ops",
  412. NULL,
  413. };
  414. const char *pv_ops;
  415. struct symbol *sym;
  416. int idx, nr;
  417. if (!opts.noinstr)
  418. return 0;
  419. file->pv_ops = NULL;
  420. sym = find_symbol_by_name(file->elf, "pv_ops");
  421. if (!sym)
  422. return 0;
  423. nr = sym->len / sizeof(unsigned long);
  424. file->pv_ops = calloc(sizeof(struct pv_state), nr);
  425. if (!file->pv_ops)
  426. return -1;
  427. for (idx = 0; idx < nr; idx++)
  428. INIT_LIST_HEAD(&file->pv_ops[idx].targets);
  429. for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
  430. add_pv_ops(file, pv_ops);
  431. return 0;
  432. }
  433. static struct instruction *find_last_insn(struct objtool_file *file,
  434. struct section *sec)
  435. {
  436. struct instruction *insn = NULL;
  437. unsigned int offset;
  438. unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
  439. for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
  440. insn = find_insn(file, sec, offset);
  441. return insn;
  442. }
  443. /*
  444. * Mark "ud2" instructions and manually annotated dead ends.
  445. */
  446. static int add_dead_ends(struct objtool_file *file)
  447. {
  448. struct section *sec;
  449. struct reloc *reloc;
  450. struct instruction *insn;
  451. /*
  452. * Check for manually annotated dead ends.
  453. */
  454. sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
  455. if (!sec)
  456. goto reachable;
  457. list_for_each_entry(reloc, &sec->reloc_list, list) {
  458. if (reloc->sym->type != STT_SECTION) {
  459. WARN("unexpected relocation symbol type in %s", sec->name);
  460. return -1;
  461. }
  462. insn = find_insn(file, reloc->sym->sec, reloc->addend);
  463. if (insn)
  464. insn = list_prev_entry(insn, list);
  465. else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
  466. insn = find_last_insn(file, reloc->sym->sec);
  467. if (!insn) {
  468. WARN("can't find unreachable insn at %s+0x%" PRIx64,
  469. reloc->sym->sec->name, reloc->addend);
  470. return -1;
  471. }
  472. } else {
  473. WARN("can't find unreachable insn at %s+0x%" PRIx64,
  474. reloc->sym->sec->name, reloc->addend);
  475. return -1;
  476. }
  477. insn->dead_end = true;
  478. }
  479. reachable:
  480. /*
  481. * These manually annotated reachable checks are needed for GCC 4.4,
  482. * where the Linux unreachable() macro isn't supported. In that case
  483. * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
  484. * not a dead end.
  485. */
  486. sec = find_section_by_name(file->elf, ".rela.discard.reachable");
  487. if (!sec)
  488. return 0;
  489. list_for_each_entry(reloc, &sec->reloc_list, list) {
  490. if (reloc->sym->type != STT_SECTION) {
  491. WARN("unexpected relocation symbol type in %s", sec->name);
  492. return -1;
  493. }
  494. insn = find_insn(file, reloc->sym->sec, reloc->addend);
  495. if (insn)
  496. insn = list_prev_entry(insn, list);
  497. else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
  498. insn = find_last_insn(file, reloc->sym->sec);
  499. if (!insn) {
  500. WARN("can't find reachable insn at %s+0x%" PRIx64,
  501. reloc->sym->sec->name, reloc->addend);
  502. return -1;
  503. }
  504. } else {
  505. WARN("can't find reachable insn at %s+0x%" PRIx64,
  506. reloc->sym->sec->name, reloc->addend);
  507. return -1;
  508. }
  509. insn->dead_end = false;
  510. }
  511. return 0;
  512. }
  513. static int create_static_call_sections(struct objtool_file *file)
  514. {
  515. struct section *sec;
  516. struct static_call_site *site;
  517. struct instruction *insn;
  518. struct symbol *key_sym;
  519. char *key_name, *tmp;
  520. int idx;
  521. sec = find_section_by_name(file->elf, ".static_call_sites");
  522. if (sec) {
  523. INIT_LIST_HEAD(&file->static_call_list);
  524. WARN("file already has .static_call_sites section, skipping");
  525. return 0;
  526. }
  527. if (list_empty(&file->static_call_list))
  528. return 0;
  529. idx = 0;
  530. list_for_each_entry(insn, &file->static_call_list, call_node)
  531. idx++;
  532. sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
  533. sizeof(struct static_call_site), idx);
  534. if (!sec)
  535. return -1;
  536. idx = 0;
  537. list_for_each_entry(insn, &file->static_call_list, call_node) {
  538. site = (struct static_call_site *)sec->data->d_buf + idx;
  539. memset(site, 0, sizeof(struct static_call_site));
  540. /* populate reloc for 'addr' */
  541. if (elf_add_reloc_to_insn(file->elf, sec,
  542. idx * sizeof(struct static_call_site),
  543. R_X86_64_PC32,
  544. insn->sec, insn->offset))
  545. return -1;
  546. /* find key symbol */
  547. key_name = strdup(insn->call_dest->name);
  548. if (!key_name) {
  549. perror("strdup");
  550. return -1;
  551. }
  552. if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
  553. STATIC_CALL_TRAMP_PREFIX_LEN)) {
  554. WARN("static_call: trampoline name malformed: %s", key_name);
  555. free(key_name);
  556. return -1;
  557. }
  558. tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
  559. memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
  560. key_sym = find_symbol_by_name(file->elf, tmp);
  561. if (!key_sym) {
  562. if (!opts.module) {
  563. WARN("static_call: can't find static_call_key symbol: %s", tmp);
  564. free(key_name);
  565. return -1;
  566. }
  567. /*
  568. * For modules(), the key might not be exported, which
  569. * means the module can make static calls but isn't
  570. * allowed to change them.
  571. *
  572. * In that case we temporarily set the key to be the
  573. * trampoline address. This is fixed up in
  574. * static_call_add_module().
  575. */
  576. key_sym = insn->call_dest;
  577. }
  578. free(key_name);
  579. /* populate reloc for 'key' */
  580. if (elf_add_reloc(file->elf, sec,
  581. idx * sizeof(struct static_call_site) + 4,
  582. R_X86_64_PC32, key_sym,
  583. is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
  584. return -1;
  585. idx++;
  586. }
  587. return 0;
  588. }
  589. static int create_retpoline_sites_sections(struct objtool_file *file)
  590. {
  591. struct instruction *insn;
  592. struct section *sec;
  593. int idx;
  594. sec = find_section_by_name(file->elf, ".retpoline_sites");
  595. if (sec) {
  596. WARN("file already has .retpoline_sites, skipping");
  597. return 0;
  598. }
  599. idx = 0;
  600. list_for_each_entry(insn, &file->retpoline_call_list, call_node)
  601. idx++;
  602. if (!idx)
  603. return 0;
  604. sec = elf_create_section(file->elf, ".retpoline_sites", 0,
  605. sizeof(int), idx);
  606. if (!sec) {
  607. WARN("elf_create_section: .retpoline_sites");
  608. return -1;
  609. }
  610. idx = 0;
  611. list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
  612. int *site = (int *)sec->data->d_buf + idx;
  613. *site = 0;
  614. if (elf_add_reloc_to_insn(file->elf, sec,
  615. idx * sizeof(int),
  616. R_X86_64_PC32,
  617. insn->sec, insn->offset)) {
  618. WARN("elf_add_reloc_to_insn: .retpoline_sites");
  619. return -1;
  620. }
  621. idx++;
  622. }
  623. return 0;
  624. }
  625. static int create_return_sites_sections(struct objtool_file *file)
  626. {
  627. struct instruction *insn;
  628. struct section *sec;
  629. int idx;
  630. sec = find_section_by_name(file->elf, ".return_sites");
  631. if (sec) {
  632. WARN("file already has .return_sites, skipping");
  633. return 0;
  634. }
  635. idx = 0;
  636. list_for_each_entry(insn, &file->return_thunk_list, call_node)
  637. idx++;
  638. if (!idx)
  639. return 0;
  640. sec = elf_create_section(file->elf, ".return_sites", 0,
  641. sizeof(int), idx);
  642. if (!sec) {
  643. WARN("elf_create_section: .return_sites");
  644. return -1;
  645. }
  646. idx = 0;
  647. list_for_each_entry(insn, &file->return_thunk_list, call_node) {
  648. int *site = (int *)sec->data->d_buf + idx;
  649. *site = 0;
  650. if (elf_add_reloc_to_insn(file->elf, sec,
  651. idx * sizeof(int),
  652. R_X86_64_PC32,
  653. insn->sec, insn->offset)) {
  654. WARN("elf_add_reloc_to_insn: .return_sites");
  655. return -1;
  656. }
  657. idx++;
  658. }
  659. return 0;
  660. }
  661. static int create_ibt_endbr_seal_sections(struct objtool_file *file)
  662. {
  663. struct instruction *insn;
  664. struct section *sec;
  665. int idx;
  666. sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
  667. if (sec) {
  668. WARN("file already has .ibt_endbr_seal, skipping");
  669. return 0;
  670. }
  671. idx = 0;
  672. list_for_each_entry(insn, &file->endbr_list, call_node)
  673. idx++;
  674. if (opts.stats) {
  675. printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
  676. printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
  677. printf("ibt: superfluous ENDBR: %d\n", idx);
  678. }
  679. if (!idx)
  680. return 0;
  681. sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
  682. sizeof(int), idx);
  683. if (!sec) {
  684. WARN("elf_create_section: .ibt_endbr_seal");
  685. return -1;
  686. }
  687. idx = 0;
  688. list_for_each_entry(insn, &file->endbr_list, call_node) {
  689. int *site = (int *)sec->data->d_buf + idx;
  690. *site = 0;
  691. if (elf_add_reloc_to_insn(file->elf, sec,
  692. idx * sizeof(int),
  693. R_X86_64_PC32,
  694. insn->sec, insn->offset)) {
  695. WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
  696. return -1;
  697. }
  698. idx++;
  699. }
  700. return 0;
  701. }
  702. static int create_mcount_loc_sections(struct objtool_file *file)
  703. {
  704. struct section *sec;
  705. unsigned long *loc;
  706. struct instruction *insn;
  707. int idx;
  708. sec = find_section_by_name(file->elf, "__mcount_loc");
  709. if (sec) {
  710. INIT_LIST_HEAD(&file->mcount_loc_list);
  711. WARN("file already has __mcount_loc section, skipping");
  712. return 0;
  713. }
  714. if (list_empty(&file->mcount_loc_list))
  715. return 0;
  716. idx = 0;
  717. list_for_each_entry(insn, &file->mcount_loc_list, call_node)
  718. idx++;
  719. sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
  720. if (!sec)
  721. return -1;
  722. idx = 0;
  723. list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
  724. loc = (unsigned long *)sec->data->d_buf + idx;
  725. memset(loc, 0, sizeof(unsigned long));
  726. if (elf_add_reloc_to_insn(file->elf, sec,
  727. idx * sizeof(unsigned long),
  728. R_X86_64_64,
  729. insn->sec, insn->offset))
  730. return -1;
  731. idx++;
  732. }
  733. return 0;
  734. }
  735. /*
  736. * Warnings shouldn't be reported for ignored functions.
  737. */
  738. static void add_ignores(struct objtool_file *file)
  739. {
  740. struct instruction *insn;
  741. struct section *sec;
  742. struct symbol *func;
  743. struct reloc *reloc;
  744. sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
  745. if (!sec)
  746. return;
  747. list_for_each_entry(reloc, &sec->reloc_list, list) {
  748. switch (reloc->sym->type) {
  749. case STT_FUNC:
  750. func = reloc->sym;
  751. break;
  752. case STT_SECTION:
  753. func = find_func_by_offset(reloc->sym->sec, reloc->addend);
  754. if (!func)
  755. continue;
  756. break;
  757. default:
  758. WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
  759. continue;
  760. }
  761. func_for_each_insn(file, func, insn)
  762. insn->ignore = true;
  763. }
  764. }
  765. /*
  766. * This is a whitelist of functions that is allowed to be called with AC set.
  767. * The list is meant to be minimal and only contains compiler instrumentation
  768. * ABI and a few functions used to implement *_{to,from}_user() functions.
  769. *
  770. * These functions must not directly change AC, but may PUSHF/POPF.
  771. */
  772. static const char *uaccess_safe_builtin[] = {
  773. /* KASAN */
  774. "kasan_report",
  775. "kasan_check_range",
  776. /* KASAN out-of-line */
  777. "__asan_loadN_noabort",
  778. "__asan_load1_noabort",
  779. "__asan_load2_noabort",
  780. "__asan_load4_noabort",
  781. "__asan_load8_noabort",
  782. "__asan_load16_noabort",
  783. "__asan_storeN_noabort",
  784. "__asan_store1_noabort",
  785. "__asan_store2_noabort",
  786. "__asan_store4_noabort",
  787. "__asan_store8_noabort",
  788. "__asan_store16_noabort",
  789. "__kasan_check_read",
  790. "__kasan_check_write",
  791. /* KASAN in-line */
  792. "__asan_report_load_n_noabort",
  793. "__asan_report_load1_noabort",
  794. "__asan_report_load2_noabort",
  795. "__asan_report_load4_noabort",
  796. "__asan_report_load8_noabort",
  797. "__asan_report_load16_noabort",
  798. "__asan_report_store_n_noabort",
  799. "__asan_report_store1_noabort",
  800. "__asan_report_store2_noabort",
  801. "__asan_report_store4_noabort",
  802. "__asan_report_store8_noabort",
  803. "__asan_report_store16_noabort",
  804. /* KCSAN */
  805. "__kcsan_check_access",
  806. "__kcsan_mb",
  807. "__kcsan_wmb",
  808. "__kcsan_rmb",
  809. "__kcsan_release",
  810. "kcsan_found_watchpoint",
  811. "kcsan_setup_watchpoint",
  812. "kcsan_check_scoped_accesses",
  813. "kcsan_disable_current",
  814. "kcsan_enable_current_nowarn",
  815. /* KCSAN/TSAN */
  816. "__tsan_func_entry",
  817. "__tsan_func_exit",
  818. "__tsan_read_range",
  819. "__tsan_write_range",
  820. "__tsan_read1",
  821. "__tsan_read2",
  822. "__tsan_read4",
  823. "__tsan_read8",
  824. "__tsan_read16",
  825. "__tsan_write1",
  826. "__tsan_write2",
  827. "__tsan_write4",
  828. "__tsan_write8",
  829. "__tsan_write16",
  830. "__tsan_read_write1",
  831. "__tsan_read_write2",
  832. "__tsan_read_write4",
  833. "__tsan_read_write8",
  834. "__tsan_read_write16",
  835. "__tsan_volatile_read1",
  836. "__tsan_volatile_read2",
  837. "__tsan_volatile_read4",
  838. "__tsan_volatile_read8",
  839. "__tsan_volatile_read16",
  840. "__tsan_volatile_write1",
  841. "__tsan_volatile_write2",
  842. "__tsan_volatile_write4",
  843. "__tsan_volatile_write8",
  844. "__tsan_volatile_write16",
  845. "__tsan_atomic8_load",
  846. "__tsan_atomic16_load",
  847. "__tsan_atomic32_load",
  848. "__tsan_atomic64_load",
  849. "__tsan_atomic8_store",
  850. "__tsan_atomic16_store",
  851. "__tsan_atomic32_store",
  852. "__tsan_atomic64_store",
  853. "__tsan_atomic8_exchange",
  854. "__tsan_atomic16_exchange",
  855. "__tsan_atomic32_exchange",
  856. "__tsan_atomic64_exchange",
  857. "__tsan_atomic8_fetch_add",
  858. "__tsan_atomic16_fetch_add",
  859. "__tsan_atomic32_fetch_add",
  860. "__tsan_atomic64_fetch_add",
  861. "__tsan_atomic8_fetch_sub",
  862. "__tsan_atomic16_fetch_sub",
  863. "__tsan_atomic32_fetch_sub",
  864. "__tsan_atomic64_fetch_sub",
  865. "__tsan_atomic8_fetch_and",
  866. "__tsan_atomic16_fetch_and",
  867. "__tsan_atomic32_fetch_and",
  868. "__tsan_atomic64_fetch_and",
  869. "__tsan_atomic8_fetch_or",
  870. "__tsan_atomic16_fetch_or",
  871. "__tsan_atomic32_fetch_or",
  872. "__tsan_atomic64_fetch_or",
  873. "__tsan_atomic8_fetch_xor",
  874. "__tsan_atomic16_fetch_xor",
  875. "__tsan_atomic32_fetch_xor",
  876. "__tsan_atomic64_fetch_xor",
  877. "__tsan_atomic8_fetch_nand",
  878. "__tsan_atomic16_fetch_nand",
  879. "__tsan_atomic32_fetch_nand",
  880. "__tsan_atomic64_fetch_nand",
  881. "__tsan_atomic8_compare_exchange_strong",
  882. "__tsan_atomic16_compare_exchange_strong",
  883. "__tsan_atomic32_compare_exchange_strong",
  884. "__tsan_atomic64_compare_exchange_strong",
  885. "__tsan_atomic8_compare_exchange_weak",
  886. "__tsan_atomic16_compare_exchange_weak",
  887. "__tsan_atomic32_compare_exchange_weak",
  888. "__tsan_atomic64_compare_exchange_weak",
  889. "__tsan_atomic8_compare_exchange_val",
  890. "__tsan_atomic16_compare_exchange_val",
  891. "__tsan_atomic32_compare_exchange_val",
  892. "__tsan_atomic64_compare_exchange_val",
  893. "__tsan_atomic_thread_fence",
  894. "__tsan_atomic_signal_fence",
  895. "__tsan_unaligned_read16",
  896. "__tsan_unaligned_write16",
  897. /* KCOV */
  898. "write_comp_data",
  899. "check_kcov_mode",
  900. "__sanitizer_cov_trace_pc",
  901. "__sanitizer_cov_trace_const_cmp1",
  902. "__sanitizer_cov_trace_const_cmp2",
  903. "__sanitizer_cov_trace_const_cmp4",
  904. "__sanitizer_cov_trace_const_cmp8",
  905. "__sanitizer_cov_trace_cmp1",
  906. "__sanitizer_cov_trace_cmp2",
  907. "__sanitizer_cov_trace_cmp4",
  908. "__sanitizer_cov_trace_cmp8",
  909. "__sanitizer_cov_trace_switch",
  910. /* KMSAN */
  911. "kmsan_copy_to_user",
  912. "kmsan_report",
  913. "kmsan_unpoison_entry_regs",
  914. "kmsan_unpoison_memory",
  915. "__msan_chain_origin",
  916. "__msan_get_context_state",
  917. "__msan_instrument_asm_store",
  918. "__msan_metadata_ptr_for_load_1",
  919. "__msan_metadata_ptr_for_load_2",
  920. "__msan_metadata_ptr_for_load_4",
  921. "__msan_metadata_ptr_for_load_8",
  922. "__msan_metadata_ptr_for_load_n",
  923. "__msan_metadata_ptr_for_store_1",
  924. "__msan_metadata_ptr_for_store_2",
  925. "__msan_metadata_ptr_for_store_4",
  926. "__msan_metadata_ptr_for_store_8",
  927. "__msan_metadata_ptr_for_store_n",
  928. "__msan_poison_alloca",
  929. "__msan_warning",
  930. /* UBSAN */
  931. "ubsan_type_mismatch_common",
  932. "__ubsan_handle_type_mismatch",
  933. "__ubsan_handle_type_mismatch_v1",
  934. "__ubsan_handle_shift_out_of_bounds",
  935. /* misc */
  936. "csum_partial_copy_generic",
  937. "copy_mc_fragile",
  938. "copy_mc_fragile_handle_tail",
  939. "copy_mc_enhanced_fast_string",
  940. "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
  941. "clear_user_erms",
  942. "clear_user_rep_good",
  943. "clear_user_original",
  944. NULL
  945. };
  946. static void add_uaccess_safe(struct objtool_file *file)
  947. {
  948. struct symbol *func;
  949. const char **name;
  950. if (!opts.uaccess)
  951. return;
  952. for (name = uaccess_safe_builtin; *name; name++) {
  953. func = find_symbol_by_name(file->elf, *name);
  954. if (!func)
  955. continue;
  956. func->uaccess_safe = true;
  957. }
  958. }
  959. /*
  960. * FIXME: For now, just ignore any alternatives which add retpolines. This is
  961. * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
  962. * But it at least allows objtool to understand the control flow *around* the
  963. * retpoline.
  964. */
  965. static int add_ignore_alternatives(struct objtool_file *file)
  966. {
  967. struct section *sec;
  968. struct reloc *reloc;
  969. struct instruction *insn;
  970. sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
  971. if (!sec)
  972. return 0;
  973. list_for_each_entry(reloc, &sec->reloc_list, list) {
  974. if (reloc->sym->type != STT_SECTION) {
  975. WARN("unexpected relocation symbol type in %s", sec->name);
  976. return -1;
  977. }
  978. insn = find_insn(file, reloc->sym->sec, reloc->addend);
  979. if (!insn) {
  980. WARN("bad .discard.ignore_alts entry");
  981. return -1;
  982. }
  983. insn->ignore_alts = true;
  984. }
  985. return 0;
  986. }
  987. /*
  988. * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
  989. * will be added to the .retpoline_sites section.
  990. */
  991. __weak bool arch_is_retpoline(struct symbol *sym)
  992. {
  993. return false;
  994. }
  995. /*
  996. * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
  997. * will be added to the .return_sites section.
  998. */
  999. __weak bool arch_is_rethunk(struct symbol *sym)
  1000. {
  1001. return false;
  1002. }
  1003. /*
  1004. * Symbols that are embedded inside other instructions, because sometimes crazy
  1005. * code exists. These are mostly ignored for validation purposes.
  1006. */
  1007. __weak bool arch_is_embedded_insn(struct symbol *sym)
  1008. {
  1009. return false;
  1010. }
  1011. #define NEGATIVE_RELOC ((void *)-1L)
  1012. static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
  1013. {
  1014. if (insn->reloc == NEGATIVE_RELOC)
  1015. return NULL;
  1016. if (!insn->reloc) {
  1017. if (!file)
  1018. return NULL;
  1019. insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
  1020. insn->offset, insn->len);
  1021. if (!insn->reloc) {
  1022. insn->reloc = NEGATIVE_RELOC;
  1023. return NULL;
  1024. }
  1025. }
  1026. return insn->reloc;
  1027. }
  1028. static void remove_insn_ops(struct instruction *insn)
  1029. {
  1030. struct stack_op *op, *tmp;
  1031. list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
  1032. list_del(&op->list);
  1033. free(op);
  1034. }
  1035. }
  1036. static void annotate_call_site(struct objtool_file *file,
  1037. struct instruction *insn, bool sibling)
  1038. {
  1039. struct reloc *reloc = insn_reloc(file, insn);
  1040. struct symbol *sym = insn->call_dest;
  1041. if (!sym)
  1042. sym = reloc->sym;
  1043. /*
  1044. * Alternative replacement code is just template code which is
  1045. * sometimes copied to the original instruction. For now, don't
  1046. * annotate it. (In the future we might consider annotating the
  1047. * original instruction if/when it ever makes sense to do so.)
  1048. */
  1049. if (!strcmp(insn->sec->name, ".altinstr_replacement"))
  1050. return;
  1051. if (sym->static_call_tramp) {
  1052. list_add_tail(&insn->call_node, &file->static_call_list);
  1053. return;
  1054. }
  1055. if (sym->retpoline_thunk) {
  1056. list_add_tail(&insn->call_node, &file->retpoline_call_list);
  1057. return;
  1058. }
  1059. /*
  1060. * Many compilers cannot disable KCOV or sanitizer calls with a function
  1061. * attribute so they need a little help, NOP out any such calls from
  1062. * noinstr text.
  1063. */
  1064. if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
  1065. if (reloc) {
  1066. reloc->type = R_NONE;
  1067. elf_write_reloc(file->elf, reloc);
  1068. }
  1069. elf_write_insn(file->elf, insn->sec,
  1070. insn->offset, insn->len,
  1071. sibling ? arch_ret_insn(insn->len)
  1072. : arch_nop_insn(insn->len));
  1073. insn->type = sibling ? INSN_RETURN : INSN_NOP;
  1074. if (sibling) {
  1075. /*
  1076. * We've replaced the tail-call JMP insn by two new
  1077. * insn: RET; INT3, except we only have a single struct
  1078. * insn here. Mark it retpoline_safe to avoid the SLS
  1079. * warning, instead of adding another insn.
  1080. */
  1081. insn->retpoline_safe = true;
  1082. }
  1083. return;
  1084. }
  1085. if (opts.mcount && sym->fentry) {
  1086. if (sibling)
  1087. WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
  1088. if (reloc) {
  1089. reloc->type = R_NONE;
  1090. elf_write_reloc(file->elf, reloc);
  1091. }
  1092. elf_write_insn(file->elf, insn->sec,
  1093. insn->offset, insn->len,
  1094. arch_nop_insn(insn->len));
  1095. insn->type = INSN_NOP;
  1096. list_add_tail(&insn->call_node, &file->mcount_loc_list);
  1097. return;
  1098. }
  1099. if (!sibling && dead_end_function(file, sym))
  1100. insn->dead_end = true;
  1101. }
  1102. static void add_call_dest(struct objtool_file *file, struct instruction *insn,
  1103. struct symbol *dest, bool sibling)
  1104. {
  1105. insn->call_dest = dest;
  1106. if (!dest)
  1107. return;
  1108. /*
  1109. * Whatever stack impact regular CALLs have, should be undone
  1110. * by the RETURN of the called function.
  1111. *
  1112. * Annotated intra-function calls retain the stack_ops but
  1113. * are converted to JUMP, see read_intra_function_calls().
  1114. */
  1115. remove_insn_ops(insn);
  1116. annotate_call_site(file, insn, sibling);
  1117. }
  1118. static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
  1119. {
  1120. /*
  1121. * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
  1122. * so convert them accordingly.
  1123. */
  1124. switch (insn->type) {
  1125. case INSN_CALL:
  1126. insn->type = INSN_CALL_DYNAMIC;
  1127. break;
  1128. case INSN_JUMP_UNCONDITIONAL:
  1129. insn->type = INSN_JUMP_DYNAMIC;
  1130. break;
  1131. case INSN_JUMP_CONDITIONAL:
  1132. insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
  1133. break;
  1134. default:
  1135. return;
  1136. }
  1137. insn->retpoline_safe = true;
  1138. /*
  1139. * Whatever stack impact regular CALLs have, should be undone
  1140. * by the RETURN of the called function.
  1141. *
  1142. * Annotated intra-function calls retain the stack_ops but
  1143. * are converted to JUMP, see read_intra_function_calls().
  1144. */
  1145. remove_insn_ops(insn);
  1146. annotate_call_site(file, insn, false);
  1147. }
  1148. static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
  1149. {
  1150. /*
  1151. * Return thunk tail calls are really just returns in disguise,
  1152. * so convert them accordingly.
  1153. */
  1154. insn->type = INSN_RETURN;
  1155. insn->retpoline_safe = true;
  1156. if (add)
  1157. list_add_tail(&insn->call_node, &file->return_thunk_list);
  1158. }
  1159. static bool same_function(struct instruction *insn1, struct instruction *insn2)
  1160. {
  1161. return insn1->func->pfunc == insn2->func->pfunc;
  1162. }
  1163. static bool is_first_func_insn(struct objtool_file *file, struct instruction *insn)
  1164. {
  1165. if (insn->offset == insn->func->offset)
  1166. return true;
  1167. if (opts.ibt) {
  1168. struct instruction *prev = prev_insn_same_sym(file, insn);
  1169. if (prev && prev->type == INSN_ENDBR &&
  1170. insn->offset == insn->func->offset + prev->len)
  1171. return true;
  1172. }
  1173. return false;
  1174. }
  1175. /*
  1176. * Find the destination instructions for all jumps.
  1177. */
  1178. static int add_jump_destinations(struct objtool_file *file)
  1179. {
  1180. struct instruction *insn, *jump_dest;
  1181. struct reloc *reloc;
  1182. struct section *dest_sec;
  1183. unsigned long dest_off;
  1184. for_each_insn(file, insn) {
  1185. if (insn->jump_dest) {
  1186. /*
  1187. * handle_group_alt() may have previously set
  1188. * 'jump_dest' for some alternatives.
  1189. */
  1190. continue;
  1191. }
  1192. if (!is_static_jump(insn))
  1193. continue;
  1194. reloc = insn_reloc(file, insn);
  1195. if (!reloc) {
  1196. dest_sec = insn->sec;
  1197. dest_off = arch_jump_destination(insn);
  1198. } else if (reloc->sym->type == STT_SECTION) {
  1199. dest_sec = reloc->sym->sec;
  1200. dest_off = arch_dest_reloc_offset(reloc->addend);
  1201. } else if (reloc->sym->retpoline_thunk) {
  1202. add_retpoline_call(file, insn);
  1203. continue;
  1204. } else if (reloc->sym->return_thunk) {
  1205. add_return_call(file, insn, true);
  1206. continue;
  1207. } else if (insn->func) {
  1208. /*
  1209. * External sibling call or internal sibling call with
  1210. * STT_FUNC reloc.
  1211. */
  1212. add_call_dest(file, insn, reloc->sym, true);
  1213. continue;
  1214. } else if (reloc->sym->sec->idx) {
  1215. dest_sec = reloc->sym->sec;
  1216. dest_off = reloc->sym->sym.st_value +
  1217. arch_dest_reloc_offset(reloc->addend);
  1218. } else {
  1219. /* non-func asm code jumping to another file */
  1220. continue;
  1221. }
  1222. jump_dest = find_insn(file, dest_sec, dest_off);
  1223. if (!jump_dest) {
  1224. struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
  1225. /*
  1226. * This is a special case for retbleed_untrain_ret().
  1227. * It jumps to __x86_return_thunk(), but objtool
  1228. * can't find the thunk's starting RET
  1229. * instruction, because the RET is also in the
  1230. * middle of another instruction. Objtool only
  1231. * knows about the outer instruction.
  1232. */
  1233. if (sym && sym->embedded_insn) {
  1234. add_return_call(file, insn, false);
  1235. continue;
  1236. }
  1237. WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
  1238. insn->sec, insn->offset, dest_sec->name,
  1239. dest_off);
  1240. return -1;
  1241. }
  1242. /*
  1243. * Cross-function jump.
  1244. */
  1245. if (insn->func && jump_dest->func &&
  1246. insn->func != jump_dest->func) {
  1247. /*
  1248. * For GCC 8+, create parent/child links for any cold
  1249. * subfunctions. This is _mostly_ redundant with a
  1250. * similar initialization in read_symbols().
  1251. *
  1252. * If a function has aliases, we want the *first* such
  1253. * function in the symbol table to be the subfunction's
  1254. * parent. In that case we overwrite the
  1255. * initialization done in read_symbols().
  1256. *
  1257. * However this code can't completely replace the
  1258. * read_symbols() code because this doesn't detect the
  1259. * case where the parent function's only reference to a
  1260. * subfunction is through a jump table.
  1261. */
  1262. if (!strstr(insn->func->name, ".cold") &&
  1263. strstr(jump_dest->func->name, ".cold")) {
  1264. insn->func->cfunc = jump_dest->func;
  1265. jump_dest->func->pfunc = insn->func;
  1266. } else if (!same_function(insn, jump_dest) &&
  1267. is_first_func_insn(file, jump_dest)) {
  1268. /*
  1269. * Internal sibling call without reloc or with
  1270. * STT_SECTION reloc.
  1271. */
  1272. add_call_dest(file, insn, jump_dest->func, true);
  1273. continue;
  1274. }
  1275. }
  1276. insn->jump_dest = jump_dest;
  1277. }
  1278. return 0;
  1279. }
  1280. static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
  1281. {
  1282. struct symbol *call_dest;
  1283. call_dest = find_func_by_offset(sec, offset);
  1284. if (!call_dest)
  1285. call_dest = find_symbol_by_offset(sec, offset);
  1286. return call_dest;
  1287. }
  1288. /*
  1289. * Find the destination instructions for all calls.
  1290. */
  1291. static int add_call_destinations(struct objtool_file *file)
  1292. {
  1293. struct instruction *insn;
  1294. unsigned long dest_off;
  1295. struct symbol *dest;
  1296. struct reloc *reloc;
  1297. for_each_insn(file, insn) {
  1298. if (insn->type != INSN_CALL)
  1299. continue;
  1300. reloc = insn_reloc(file, insn);
  1301. if (!reloc) {
  1302. dest_off = arch_jump_destination(insn);
  1303. dest = find_call_destination(insn->sec, dest_off);
  1304. add_call_dest(file, insn, dest, false);
  1305. if (insn->ignore)
  1306. continue;
  1307. if (!insn->call_dest) {
  1308. WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
  1309. return -1;
  1310. }
  1311. if (insn->func && insn->call_dest->type != STT_FUNC) {
  1312. WARN_FUNC("unsupported call to non-function",
  1313. insn->sec, insn->offset);
  1314. return -1;
  1315. }
  1316. } else if (reloc->sym->type == STT_SECTION) {
  1317. dest_off = arch_dest_reloc_offset(reloc->addend);
  1318. dest = find_call_destination(reloc->sym->sec, dest_off);
  1319. if (!dest) {
  1320. WARN_FUNC("can't find call dest symbol at %s+0x%lx",
  1321. insn->sec, insn->offset,
  1322. reloc->sym->sec->name,
  1323. dest_off);
  1324. return -1;
  1325. }
  1326. add_call_dest(file, insn, dest, false);
  1327. } else if (reloc->sym->retpoline_thunk) {
  1328. add_retpoline_call(file, insn);
  1329. } else
  1330. add_call_dest(file, insn, reloc->sym, false);
  1331. }
  1332. return 0;
  1333. }
  1334. /*
  1335. * The .alternatives section requires some extra special care over and above
  1336. * other special sections because alternatives are patched in place.
  1337. */
  1338. static int handle_group_alt(struct objtool_file *file,
  1339. struct special_alt *special_alt,
  1340. struct instruction *orig_insn,
  1341. struct instruction **new_insn)
  1342. {
  1343. struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
  1344. struct alt_group *orig_alt_group, *new_alt_group;
  1345. unsigned long dest_off;
  1346. orig_alt_group = malloc(sizeof(*orig_alt_group));
  1347. if (!orig_alt_group) {
  1348. WARN("malloc failed");
  1349. return -1;
  1350. }
  1351. orig_alt_group->cfi = calloc(special_alt->orig_len,
  1352. sizeof(struct cfi_state *));
  1353. if (!orig_alt_group->cfi) {
  1354. WARN("calloc failed");
  1355. return -1;
  1356. }
  1357. last_orig_insn = NULL;
  1358. insn = orig_insn;
  1359. sec_for_each_insn_from(file, insn) {
  1360. if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
  1361. break;
  1362. insn->alt_group = orig_alt_group;
  1363. last_orig_insn = insn;
  1364. }
  1365. orig_alt_group->orig_group = NULL;
  1366. orig_alt_group->first_insn = orig_insn;
  1367. orig_alt_group->last_insn = last_orig_insn;
  1368. new_alt_group = malloc(sizeof(*new_alt_group));
  1369. if (!new_alt_group) {
  1370. WARN("malloc failed");
  1371. return -1;
  1372. }
  1373. if (special_alt->new_len < special_alt->orig_len) {
  1374. /*
  1375. * Insert a fake nop at the end to make the replacement
  1376. * alt_group the same size as the original. This is needed to
  1377. * allow propagate_alt_cfi() to do its magic. When the last
  1378. * instruction affects the stack, the instruction after it (the
  1379. * nop) will propagate the new state to the shared CFI array.
  1380. */
  1381. nop = malloc(sizeof(*nop));
  1382. if (!nop) {
  1383. WARN("malloc failed");
  1384. return -1;
  1385. }
  1386. memset(nop, 0, sizeof(*nop));
  1387. INIT_LIST_HEAD(&nop->alts);
  1388. INIT_LIST_HEAD(&nop->stack_ops);
  1389. nop->sec = special_alt->new_sec;
  1390. nop->offset = special_alt->new_off + special_alt->new_len;
  1391. nop->len = special_alt->orig_len - special_alt->new_len;
  1392. nop->type = INSN_NOP;
  1393. nop->func = orig_insn->func;
  1394. nop->alt_group = new_alt_group;
  1395. nop->ignore = orig_insn->ignore_alts;
  1396. }
  1397. if (!special_alt->new_len) {
  1398. *new_insn = nop;
  1399. goto end;
  1400. }
  1401. insn = *new_insn;
  1402. sec_for_each_insn_from(file, insn) {
  1403. struct reloc *alt_reloc;
  1404. if (insn->offset >= special_alt->new_off + special_alt->new_len)
  1405. break;
  1406. last_new_insn = insn;
  1407. insn->ignore = orig_insn->ignore_alts;
  1408. insn->func = orig_insn->func;
  1409. insn->alt_group = new_alt_group;
  1410. /*
  1411. * Since alternative replacement code is copy/pasted by the
  1412. * kernel after applying relocations, generally such code can't
  1413. * have relative-address relocation references to outside the
  1414. * .altinstr_replacement section, unless the arch's
  1415. * alternatives code can adjust the relative offsets
  1416. * accordingly.
  1417. */
  1418. alt_reloc = insn_reloc(file, insn);
  1419. if (alt_reloc &&
  1420. !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
  1421. WARN_FUNC("unsupported relocation in alternatives section",
  1422. insn->sec, insn->offset);
  1423. return -1;
  1424. }
  1425. if (!is_static_jump(insn))
  1426. continue;
  1427. if (!insn->immediate)
  1428. continue;
  1429. dest_off = arch_jump_destination(insn);
  1430. if (dest_off == special_alt->new_off + special_alt->new_len) {
  1431. insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
  1432. if (!insn->jump_dest) {
  1433. WARN_FUNC("can't find alternative jump destination",
  1434. insn->sec, insn->offset);
  1435. return -1;
  1436. }
  1437. }
  1438. }
  1439. if (!last_new_insn) {
  1440. WARN_FUNC("can't find last new alternative instruction",
  1441. special_alt->new_sec, special_alt->new_off);
  1442. return -1;
  1443. }
  1444. if (nop)
  1445. list_add(&nop->list, &last_new_insn->list);
  1446. end:
  1447. new_alt_group->orig_group = orig_alt_group;
  1448. new_alt_group->first_insn = *new_insn;
  1449. new_alt_group->last_insn = nop ? : last_new_insn;
  1450. new_alt_group->cfi = orig_alt_group->cfi;
  1451. return 0;
  1452. }
  1453. /*
  1454. * A jump table entry can either convert a nop to a jump or a jump to a nop.
  1455. * If the original instruction is a jump, make the alt entry an effective nop
  1456. * by just skipping the original instruction.
  1457. */
  1458. static int handle_jump_alt(struct objtool_file *file,
  1459. struct special_alt *special_alt,
  1460. struct instruction *orig_insn,
  1461. struct instruction **new_insn)
  1462. {
  1463. if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
  1464. orig_insn->type != INSN_NOP) {
  1465. WARN_FUNC("unsupported instruction at jump label",
  1466. orig_insn->sec, orig_insn->offset);
  1467. return -1;
  1468. }
  1469. if (opts.hack_jump_label && special_alt->key_addend & 2) {
  1470. struct reloc *reloc = insn_reloc(file, orig_insn);
  1471. if (reloc) {
  1472. reloc->type = R_NONE;
  1473. elf_write_reloc(file->elf, reloc);
  1474. }
  1475. elf_write_insn(file->elf, orig_insn->sec,
  1476. orig_insn->offset, orig_insn->len,
  1477. arch_nop_insn(orig_insn->len));
  1478. orig_insn->type = INSN_NOP;
  1479. }
  1480. if (orig_insn->type == INSN_NOP) {
  1481. if (orig_insn->len == 2)
  1482. file->jl_nop_short++;
  1483. else
  1484. file->jl_nop_long++;
  1485. return 0;
  1486. }
  1487. if (orig_insn->len == 2)
  1488. file->jl_short++;
  1489. else
  1490. file->jl_long++;
  1491. *new_insn = list_next_entry(orig_insn, list);
  1492. return 0;
  1493. }
  1494. /*
  1495. * Read all the special sections which have alternate instructions which can be
  1496. * patched in or redirected to at runtime. Each instruction having alternate
  1497. * instruction(s) has them added to its insn->alts list, which will be
  1498. * traversed in validate_branch().
  1499. */
  1500. static int add_special_section_alts(struct objtool_file *file)
  1501. {
  1502. struct list_head special_alts;
  1503. struct instruction *orig_insn, *new_insn;
  1504. struct special_alt *special_alt, *tmp;
  1505. struct alternative *alt;
  1506. int ret;
  1507. ret = special_get_alts(file->elf, &special_alts);
  1508. if (ret)
  1509. return ret;
  1510. list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
  1511. orig_insn = find_insn(file, special_alt->orig_sec,
  1512. special_alt->orig_off);
  1513. if (!orig_insn) {
  1514. WARN_FUNC("special: can't find orig instruction",
  1515. special_alt->orig_sec, special_alt->orig_off);
  1516. ret = -1;
  1517. goto out;
  1518. }
  1519. new_insn = NULL;
  1520. if (!special_alt->group || special_alt->new_len) {
  1521. new_insn = find_insn(file, special_alt->new_sec,
  1522. special_alt->new_off);
  1523. if (!new_insn) {
  1524. WARN_FUNC("special: can't find new instruction",
  1525. special_alt->new_sec,
  1526. special_alt->new_off);
  1527. ret = -1;
  1528. goto out;
  1529. }
  1530. }
  1531. if (special_alt->group) {
  1532. if (!special_alt->orig_len) {
  1533. WARN_FUNC("empty alternative entry",
  1534. orig_insn->sec, orig_insn->offset);
  1535. continue;
  1536. }
  1537. ret = handle_group_alt(file, special_alt, orig_insn,
  1538. &new_insn);
  1539. if (ret)
  1540. goto out;
  1541. } else if (special_alt->jump_or_nop) {
  1542. ret = handle_jump_alt(file, special_alt, orig_insn,
  1543. &new_insn);
  1544. if (ret)
  1545. goto out;
  1546. }
  1547. alt = malloc(sizeof(*alt));
  1548. if (!alt) {
  1549. WARN("malloc failed");
  1550. ret = -1;
  1551. goto out;
  1552. }
  1553. alt->insn = new_insn;
  1554. alt->skip_orig = special_alt->skip_orig;
  1555. orig_insn->ignore_alts |= special_alt->skip_alt;
  1556. list_add_tail(&alt->list, &orig_insn->alts);
  1557. list_del(&special_alt->list);
  1558. free(special_alt);
  1559. }
  1560. if (opts.stats) {
  1561. printf("jl\\\tNOP\tJMP\n");
  1562. printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
  1563. printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
  1564. }
  1565. out:
  1566. return ret;
  1567. }
  1568. static int add_jump_table(struct objtool_file *file, struct instruction *insn,
  1569. struct reloc *table)
  1570. {
  1571. struct reloc *reloc = table;
  1572. struct instruction *dest_insn;
  1573. struct alternative *alt;
  1574. struct symbol *pfunc = insn->func->pfunc;
  1575. unsigned int prev_offset = 0;
  1576. /*
  1577. * Each @reloc is a switch table relocation which points to the target
  1578. * instruction.
  1579. */
  1580. list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
  1581. /* Check for the end of the table: */
  1582. if (reloc != table && reloc->jump_table_start)
  1583. break;
  1584. /* Make sure the table entries are consecutive: */
  1585. if (prev_offset && reloc->offset != prev_offset + 8)
  1586. break;
  1587. /* Detect function pointers from contiguous objects: */
  1588. if (reloc->sym->sec == pfunc->sec &&
  1589. reloc->addend == pfunc->offset)
  1590. break;
  1591. dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
  1592. if (!dest_insn)
  1593. break;
  1594. /* Make sure the destination is in the same function: */
  1595. if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
  1596. break;
  1597. alt = malloc(sizeof(*alt));
  1598. if (!alt) {
  1599. WARN("malloc failed");
  1600. return -1;
  1601. }
  1602. alt->insn = dest_insn;
  1603. list_add_tail(&alt->list, &insn->alts);
  1604. prev_offset = reloc->offset;
  1605. }
  1606. if (!prev_offset) {
  1607. WARN_FUNC("can't find switch jump table",
  1608. insn->sec, insn->offset);
  1609. return -1;
  1610. }
  1611. return 0;
  1612. }
  1613. /*
  1614. * find_jump_table() - Given a dynamic jump, find the switch jump table
  1615. * associated with it.
  1616. */
  1617. static struct reloc *find_jump_table(struct objtool_file *file,
  1618. struct symbol *func,
  1619. struct instruction *insn)
  1620. {
  1621. struct reloc *table_reloc;
  1622. struct instruction *dest_insn, *orig_insn = insn;
  1623. /*
  1624. * Backward search using the @first_jump_src links, these help avoid
  1625. * much of the 'in between' code. Which avoids us getting confused by
  1626. * it.
  1627. */
  1628. for (;
  1629. insn && insn->func && insn->func->pfunc == func;
  1630. insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
  1631. if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
  1632. break;
  1633. /* allow small jumps within the range */
  1634. if (insn->type == INSN_JUMP_UNCONDITIONAL &&
  1635. insn->jump_dest &&
  1636. (insn->jump_dest->offset <= insn->offset ||
  1637. insn->jump_dest->offset > orig_insn->offset))
  1638. break;
  1639. table_reloc = arch_find_switch_table(file, insn);
  1640. if (!table_reloc)
  1641. continue;
  1642. dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
  1643. if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
  1644. continue;
  1645. return table_reloc;
  1646. }
  1647. return NULL;
  1648. }
  1649. /*
  1650. * First pass: Mark the head of each jump table so that in the next pass,
  1651. * we know when a given jump table ends and the next one starts.
  1652. */
  1653. static void mark_func_jump_tables(struct objtool_file *file,
  1654. struct symbol *func)
  1655. {
  1656. struct instruction *insn, *last = NULL;
  1657. struct reloc *reloc;
  1658. func_for_each_insn(file, func, insn) {
  1659. if (!last)
  1660. last = insn;
  1661. /*
  1662. * Store back-pointers for unconditional forward jumps such
  1663. * that find_jump_table() can back-track using those and
  1664. * avoid some potentially confusing code.
  1665. */
  1666. if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
  1667. insn->offset > last->offset &&
  1668. insn->jump_dest->offset > insn->offset &&
  1669. !insn->jump_dest->first_jump_src) {
  1670. insn->jump_dest->first_jump_src = insn;
  1671. last = insn->jump_dest;
  1672. }
  1673. if (insn->type != INSN_JUMP_DYNAMIC)
  1674. continue;
  1675. reloc = find_jump_table(file, func, insn);
  1676. if (reloc) {
  1677. reloc->jump_table_start = true;
  1678. insn->jump_table = reloc;
  1679. }
  1680. }
  1681. }
  1682. static int add_func_jump_tables(struct objtool_file *file,
  1683. struct symbol *func)
  1684. {
  1685. struct instruction *insn;
  1686. int ret;
  1687. func_for_each_insn(file, func, insn) {
  1688. if (!insn->jump_table)
  1689. continue;
  1690. ret = add_jump_table(file, insn, insn->jump_table);
  1691. if (ret)
  1692. return ret;
  1693. }
  1694. return 0;
  1695. }
  1696. /*
  1697. * For some switch statements, gcc generates a jump table in the .rodata
  1698. * section which contains a list of addresses within the function to jump to.
  1699. * This finds these jump tables and adds them to the insn->alts lists.
  1700. */
  1701. static int add_jump_table_alts(struct objtool_file *file)
  1702. {
  1703. struct section *sec;
  1704. struct symbol *func;
  1705. int ret;
  1706. if (!file->rodata)
  1707. return 0;
  1708. for_each_sec(file, sec) {
  1709. list_for_each_entry(func, &sec->symbol_list, list) {
  1710. if (func->type != STT_FUNC)
  1711. continue;
  1712. mark_func_jump_tables(file, func);
  1713. ret = add_func_jump_tables(file, func);
  1714. if (ret)
  1715. return ret;
  1716. }
  1717. }
  1718. return 0;
  1719. }
  1720. static void set_func_state(struct cfi_state *state)
  1721. {
  1722. state->cfa = initial_func_cfi.cfa;
  1723. memcpy(&state->regs, &initial_func_cfi.regs,
  1724. CFI_NUM_REGS * sizeof(struct cfi_reg));
  1725. state->stack_size = initial_func_cfi.cfa.offset;
  1726. }
  1727. static int read_unwind_hints(struct objtool_file *file)
  1728. {
  1729. struct cfi_state cfi = init_cfi;
  1730. struct section *sec, *relocsec;
  1731. struct unwind_hint *hint;
  1732. struct instruction *insn;
  1733. struct reloc *reloc;
  1734. int i;
  1735. sec = find_section_by_name(file->elf, ".discard.unwind_hints");
  1736. if (!sec)
  1737. return 0;
  1738. relocsec = sec->reloc;
  1739. if (!relocsec) {
  1740. WARN("missing .rela.discard.unwind_hints section");
  1741. return -1;
  1742. }
  1743. if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
  1744. WARN("struct unwind_hint size mismatch");
  1745. return -1;
  1746. }
  1747. file->hints = true;
  1748. for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
  1749. hint = (struct unwind_hint *)sec->data->d_buf + i;
  1750. reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
  1751. if (!reloc) {
  1752. WARN("can't find reloc for unwind_hints[%d]", i);
  1753. return -1;
  1754. }
  1755. insn = find_insn(file, reloc->sym->sec, reloc->addend);
  1756. if (!insn) {
  1757. WARN("can't find insn for unwind_hints[%d]", i);
  1758. return -1;
  1759. }
  1760. insn->hint = true;
  1761. if (hint->type == UNWIND_HINT_TYPE_SAVE) {
  1762. insn->hint = false;
  1763. insn->save = true;
  1764. continue;
  1765. }
  1766. if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
  1767. insn->restore = true;
  1768. continue;
  1769. }
  1770. if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
  1771. struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
  1772. if (sym && sym->bind == STB_GLOBAL) {
  1773. if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
  1774. WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
  1775. insn->sec, insn->offset);
  1776. }
  1777. insn->entry = 1;
  1778. }
  1779. }
  1780. if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
  1781. hint->type = UNWIND_HINT_TYPE_CALL;
  1782. insn->entry = 1;
  1783. }
  1784. if (hint->type == UNWIND_HINT_TYPE_FUNC) {
  1785. insn->cfi = &func_cfi;
  1786. continue;
  1787. }
  1788. if (insn->cfi)
  1789. cfi = *(insn->cfi);
  1790. if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
  1791. WARN_FUNC("unsupported unwind_hint sp base reg %d",
  1792. insn->sec, insn->offset, hint->sp_reg);
  1793. return -1;
  1794. }
  1795. cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
  1796. cfi.type = hint->type;
  1797. cfi.end = hint->end;
  1798. insn->cfi = cfi_hash_find_or_add(&cfi);
  1799. }
  1800. return 0;
  1801. }
  1802. static int read_noendbr_hints(struct objtool_file *file)
  1803. {
  1804. struct section *sec;
  1805. struct instruction *insn;
  1806. struct reloc *reloc;
  1807. sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
  1808. if (!sec)
  1809. return 0;
  1810. list_for_each_entry(reloc, &sec->reloc_list, list) {
  1811. insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
  1812. if (!insn) {
  1813. WARN("bad .discard.noendbr entry");
  1814. return -1;
  1815. }
  1816. insn->noendbr = 1;
  1817. }
  1818. return 0;
  1819. }
  1820. static int read_retpoline_hints(struct objtool_file *file)
  1821. {
  1822. struct section *sec;
  1823. struct instruction *insn;
  1824. struct reloc *reloc;
  1825. sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
  1826. if (!sec)
  1827. return 0;
  1828. list_for_each_entry(reloc, &sec->reloc_list, list) {
  1829. if (reloc->sym->type != STT_SECTION) {
  1830. WARN("unexpected relocation symbol type in %s", sec->name);
  1831. return -1;
  1832. }
  1833. insn = find_insn(file, reloc->sym->sec, reloc->addend);
  1834. if (!insn) {
  1835. WARN("bad .discard.retpoline_safe entry");
  1836. return -1;
  1837. }
  1838. if (insn->type != INSN_JUMP_DYNAMIC &&
  1839. insn->type != INSN_CALL_DYNAMIC &&
  1840. insn->type != INSN_RETURN &&
  1841. insn->type != INSN_NOP) {
  1842. WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
  1843. insn->sec, insn->offset);
  1844. return -1;
  1845. }
  1846. insn->retpoline_safe = true;
  1847. }
  1848. return 0;
  1849. }
  1850. static int read_instr_hints(struct objtool_file *file)
  1851. {
  1852. struct section *sec;
  1853. struct instruction *insn;
  1854. struct reloc *reloc;
  1855. sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
  1856. if (!sec)
  1857. return 0;
  1858. list_for_each_entry(reloc, &sec->reloc_list, list) {
  1859. if (reloc->sym->type != STT_SECTION) {
  1860. WARN("unexpected relocation symbol type in %s", sec->name);
  1861. return -1;
  1862. }
  1863. insn = find_insn(file, reloc->sym->sec, reloc->addend);
  1864. if (!insn) {
  1865. WARN("bad .discard.instr_end entry");
  1866. return -1;
  1867. }
  1868. insn->instr--;
  1869. }
  1870. sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
  1871. if (!sec)
  1872. return 0;
  1873. list_for_each_entry(reloc, &sec->reloc_list, list) {
  1874. if (reloc->sym->type != STT_SECTION) {
  1875. WARN("unexpected relocation symbol type in %s", sec->name);
  1876. return -1;
  1877. }
  1878. insn = find_insn(file, reloc->sym->sec, reloc->addend);
  1879. if (!insn) {
  1880. WARN("bad .discard.instr_begin entry");
  1881. return -1;
  1882. }
  1883. insn->instr++;
  1884. }
  1885. return 0;
  1886. }
  1887. static int read_intra_function_calls(struct objtool_file *file)
  1888. {
  1889. struct instruction *insn;
  1890. struct section *sec;
  1891. struct reloc *reloc;
  1892. sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
  1893. if (!sec)
  1894. return 0;
  1895. list_for_each_entry(reloc, &sec->reloc_list, list) {
  1896. unsigned long dest_off;
  1897. if (reloc->sym->type != STT_SECTION) {
  1898. WARN("unexpected relocation symbol type in %s",
  1899. sec->name);
  1900. return -1;
  1901. }
  1902. insn = find_insn(file, reloc->sym->sec, reloc->addend);
  1903. if (!insn) {
  1904. WARN("bad .discard.intra_function_call entry");
  1905. return -1;
  1906. }
  1907. if (insn->type != INSN_CALL) {
  1908. WARN_FUNC("intra_function_call not a direct call",
  1909. insn->sec, insn->offset);
  1910. return -1;
  1911. }
  1912. /*
  1913. * Treat intra-function CALLs as JMPs, but with a stack_op.
  1914. * See add_call_destinations(), which strips stack_ops from
  1915. * normal CALLs.
  1916. */
  1917. insn->type = INSN_JUMP_UNCONDITIONAL;
  1918. dest_off = arch_jump_destination(insn);
  1919. insn->jump_dest = find_insn(file, insn->sec, dest_off);
  1920. if (!insn->jump_dest) {
  1921. WARN_FUNC("can't find call dest at %s+0x%lx",
  1922. insn->sec, insn->offset,
  1923. insn->sec->name, dest_off);
  1924. return -1;
  1925. }
  1926. }
  1927. return 0;
  1928. }
  1929. /*
  1930. * Return true if name matches an instrumentation function, where calls to that
  1931. * function from noinstr code can safely be removed, but compilers won't do so.
  1932. */
  1933. static bool is_profiling_func(const char *name)
  1934. {
  1935. /*
  1936. * Many compilers cannot disable KCOV with a function attribute.
  1937. */
  1938. if (!strncmp(name, "__sanitizer_cov_", 16))
  1939. return true;
  1940. /*
  1941. * Some compilers currently do not remove __tsan_func_entry/exit nor
  1942. * __tsan_atomic_signal_fence (used for barrier instrumentation) with
  1943. * the __no_sanitize_thread attribute, remove them. Once the kernel's
  1944. * minimum Clang version is 14.0, this can be removed.
  1945. */
  1946. if (!strncmp(name, "__tsan_func_", 12) ||
  1947. !strcmp(name, "__tsan_atomic_signal_fence"))
  1948. return true;
  1949. return false;
  1950. }
  1951. static int classify_symbols(struct objtool_file *file)
  1952. {
  1953. struct section *sec;
  1954. struct symbol *func;
  1955. for_each_sec(file, sec) {
  1956. list_for_each_entry(func, &sec->symbol_list, list) {
  1957. if (func->bind != STB_GLOBAL)
  1958. continue;
  1959. if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
  1960. strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
  1961. func->static_call_tramp = true;
  1962. if (arch_is_retpoline(func))
  1963. func->retpoline_thunk = true;
  1964. if (arch_is_rethunk(func))
  1965. func->return_thunk = true;
  1966. if (arch_is_embedded_insn(func))
  1967. func->embedded_insn = true;
  1968. if (!strcmp(func->name, "__fentry__"))
  1969. func->fentry = true;
  1970. if (is_profiling_func(func->name))
  1971. func->profiling_func = true;
  1972. }
  1973. }
  1974. return 0;
  1975. }
  1976. static void mark_rodata(struct objtool_file *file)
  1977. {
  1978. struct section *sec;
  1979. bool found = false;
  1980. /*
  1981. * Search for the following rodata sections, each of which can
  1982. * potentially contain jump tables:
  1983. *
  1984. * - .rodata: can contain GCC switch tables
  1985. * - .rodata.<func>: same, if -fdata-sections is being used
  1986. * - .rodata..c_jump_table: contains C annotated jump tables
  1987. *
  1988. * .rodata.str1.* sections are ignored; they don't contain jump tables.
  1989. */
  1990. for_each_sec(file, sec) {
  1991. if (!strncmp(sec->name, ".rodata", 7) &&
  1992. !strstr(sec->name, ".str1.")) {
  1993. sec->rodata = true;
  1994. found = true;
  1995. }
  1996. }
  1997. file->rodata = found;
  1998. }
  1999. static int decode_sections(struct objtool_file *file)
  2000. {
  2001. int ret;
  2002. mark_rodata(file);
  2003. ret = init_pv_ops(file);
  2004. if (ret)
  2005. return ret;
  2006. ret = decode_instructions(file);
  2007. if (ret)
  2008. return ret;
  2009. add_ignores(file);
  2010. add_uaccess_safe(file);
  2011. ret = add_ignore_alternatives(file);
  2012. if (ret)
  2013. return ret;
  2014. /*
  2015. * Must be before read_unwind_hints() since that needs insn->noendbr.
  2016. */
  2017. ret = read_noendbr_hints(file);
  2018. if (ret)
  2019. return ret;
  2020. /*
  2021. * Must be before add_{jump_call}_destination.
  2022. */
  2023. ret = classify_symbols(file);
  2024. if (ret)
  2025. return ret;
  2026. /*
  2027. * Must be before add_jump_destinations(), which depends on 'func'
  2028. * being set for alternatives, to enable proper sibling call detection.
  2029. */
  2030. ret = add_special_section_alts(file);
  2031. if (ret)
  2032. return ret;
  2033. ret = add_jump_destinations(file);
  2034. if (ret)
  2035. return ret;
  2036. /*
  2037. * Must be before add_call_destination(); it changes INSN_CALL to
  2038. * INSN_JUMP.
  2039. */
  2040. ret = read_intra_function_calls(file);
  2041. if (ret)
  2042. return ret;
  2043. ret = add_call_destinations(file);
  2044. if (ret)
  2045. return ret;
  2046. /*
  2047. * Must be after add_call_destinations() such that it can override
  2048. * dead_end_function() marks.
  2049. */
  2050. ret = add_dead_ends(file);
  2051. if (ret)
  2052. return ret;
  2053. ret = add_jump_table_alts(file);
  2054. if (ret)
  2055. return ret;
  2056. ret = read_unwind_hints(file);
  2057. if (ret)
  2058. return ret;
  2059. ret = read_retpoline_hints(file);
  2060. if (ret)
  2061. return ret;
  2062. ret = read_instr_hints(file);
  2063. if (ret)
  2064. return ret;
  2065. return 0;
  2066. }
  2067. static bool is_special_call(struct instruction *insn)
  2068. {
  2069. if (insn->type == INSN_CALL) {
  2070. struct symbol *dest = insn->call_dest;
  2071. if (!dest)
  2072. return false;
  2073. if (dest->fentry || dest->embedded_insn)
  2074. return true;
  2075. }
  2076. return false;
  2077. }
  2078. static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
  2079. {
  2080. struct cfi_state *cfi = &state->cfi;
  2081. int i;
  2082. if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
  2083. return true;
  2084. if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
  2085. return true;
  2086. if (cfi->stack_size != initial_func_cfi.cfa.offset)
  2087. return true;
  2088. for (i = 0; i < CFI_NUM_REGS; i++) {
  2089. if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
  2090. cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
  2091. return true;
  2092. }
  2093. return false;
  2094. }
  2095. static bool check_reg_frame_pos(const struct cfi_reg *reg,
  2096. int expected_offset)
  2097. {
  2098. return reg->base == CFI_CFA &&
  2099. reg->offset == expected_offset;
  2100. }
  2101. static bool has_valid_stack_frame(struct insn_state *state)
  2102. {
  2103. struct cfi_state *cfi = &state->cfi;
  2104. if (cfi->cfa.base == CFI_BP &&
  2105. check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
  2106. check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
  2107. return true;
  2108. if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
  2109. return true;
  2110. return false;
  2111. }
  2112. static int update_cfi_state_regs(struct instruction *insn,
  2113. struct cfi_state *cfi,
  2114. struct stack_op *op)
  2115. {
  2116. struct cfi_reg *cfa = &cfi->cfa;
  2117. if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
  2118. return 0;
  2119. /* push */
  2120. if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
  2121. cfa->offset += 8;
  2122. /* pop */
  2123. if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
  2124. cfa->offset -= 8;
  2125. /* add immediate to sp */
  2126. if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
  2127. op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
  2128. cfa->offset -= op->src.offset;
  2129. return 0;
  2130. }
  2131. static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
  2132. {
  2133. if (arch_callee_saved_reg(reg) &&
  2134. cfi->regs[reg].base == CFI_UNDEFINED) {
  2135. cfi->regs[reg].base = base;
  2136. cfi->regs[reg].offset = offset;
  2137. }
  2138. }
  2139. static void restore_reg(struct cfi_state *cfi, unsigned char reg)
  2140. {
  2141. cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
  2142. cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
  2143. }
  2144. /*
  2145. * A note about DRAP stack alignment:
  2146. *
  2147. * GCC has the concept of a DRAP register, which is used to help keep track of
  2148. * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
  2149. * register. The typical DRAP pattern is:
  2150. *
  2151. * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
  2152. * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
  2153. * 41 ff 72 f8 pushq -0x8(%r10)
  2154. * 55 push %rbp
  2155. * 48 89 e5 mov %rsp,%rbp
  2156. * (more pushes)
  2157. * 41 52 push %r10
  2158. * ...
  2159. * 41 5a pop %r10
  2160. * (more pops)
  2161. * 5d pop %rbp
  2162. * 49 8d 62 f8 lea -0x8(%r10),%rsp
  2163. * c3 retq
  2164. *
  2165. * There are some variations in the epilogues, like:
  2166. *
  2167. * 5b pop %rbx
  2168. * 41 5a pop %r10
  2169. * 41 5c pop %r12
  2170. * 41 5d pop %r13
  2171. * 41 5e pop %r14
  2172. * c9 leaveq
  2173. * 49 8d 62 f8 lea -0x8(%r10),%rsp
  2174. * c3 retq
  2175. *
  2176. * and:
  2177. *
  2178. * 4c 8b 55 e8 mov -0x18(%rbp),%r10
  2179. * 48 8b 5d e0 mov -0x20(%rbp),%rbx
  2180. * 4c 8b 65 f0 mov -0x10(%rbp),%r12
  2181. * 4c 8b 6d f8 mov -0x8(%rbp),%r13
  2182. * c9 leaveq
  2183. * 49 8d 62 f8 lea -0x8(%r10),%rsp
  2184. * c3 retq
  2185. *
  2186. * Sometimes r13 is used as the DRAP register, in which case it's saved and
  2187. * restored beforehand:
  2188. *
  2189. * 41 55 push %r13
  2190. * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
  2191. * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
  2192. * ...
  2193. * 49 8d 65 f0 lea -0x10(%r13),%rsp
  2194. * 41 5d pop %r13
  2195. * c3 retq
  2196. */
  2197. static int update_cfi_state(struct instruction *insn,
  2198. struct instruction *next_insn,
  2199. struct cfi_state *cfi, struct stack_op *op)
  2200. {
  2201. struct cfi_reg *cfa = &cfi->cfa;
  2202. struct cfi_reg *regs = cfi->regs;
  2203. /* stack operations don't make sense with an undefined CFA */
  2204. if (cfa->base == CFI_UNDEFINED) {
  2205. if (insn->func) {
  2206. WARN_FUNC("undefined stack state", insn->sec, insn->offset);
  2207. return -1;
  2208. }
  2209. return 0;
  2210. }
  2211. if (cfi->type == UNWIND_HINT_TYPE_REGS ||
  2212. cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
  2213. return update_cfi_state_regs(insn, cfi, op);
  2214. switch (op->dest.type) {
  2215. case OP_DEST_REG:
  2216. switch (op->src.type) {
  2217. case OP_SRC_REG:
  2218. if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
  2219. cfa->base == CFI_SP &&
  2220. check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
  2221. /* mov %rsp, %rbp */
  2222. cfa->base = op->dest.reg;
  2223. cfi->bp_scratch = false;
  2224. }
  2225. else if (op->src.reg == CFI_SP &&
  2226. op->dest.reg == CFI_BP && cfi->drap) {
  2227. /* drap: mov %rsp, %rbp */
  2228. regs[CFI_BP].base = CFI_BP;
  2229. regs[CFI_BP].offset = -cfi->stack_size;
  2230. cfi->bp_scratch = false;
  2231. }
  2232. else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
  2233. /*
  2234. * mov %rsp, %reg
  2235. *
  2236. * This is needed for the rare case where GCC
  2237. * does:
  2238. *
  2239. * mov %rsp, %rax
  2240. * ...
  2241. * mov %rax, %rsp
  2242. */
  2243. cfi->vals[op->dest.reg].base = CFI_CFA;
  2244. cfi->vals[op->dest.reg].offset = -cfi->stack_size;
  2245. }
  2246. else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
  2247. (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
  2248. /*
  2249. * mov %rbp, %rsp
  2250. *
  2251. * Restore the original stack pointer (Clang).
  2252. */
  2253. cfi->stack_size = -cfi->regs[CFI_BP].offset;
  2254. }
  2255. else if (op->dest.reg == cfa->base) {
  2256. /* mov %reg, %rsp */
  2257. if (cfa->base == CFI_SP &&
  2258. cfi->vals[op->src.reg].base == CFI_CFA) {
  2259. /*
  2260. * This is needed for the rare case
  2261. * where GCC does something dumb like:
  2262. *
  2263. * lea 0x8(%rsp), %rcx
  2264. * ...
  2265. * mov %rcx, %rsp
  2266. */
  2267. cfa->offset = -cfi->vals[op->src.reg].offset;
  2268. cfi->stack_size = cfa->offset;
  2269. } else if (cfa->base == CFI_SP &&
  2270. cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
  2271. cfi->vals[op->src.reg].offset == cfa->offset) {
  2272. /*
  2273. * Stack swizzle:
  2274. *
  2275. * 1: mov %rsp, (%[tos])
  2276. * 2: mov %[tos], %rsp
  2277. * ...
  2278. * 3: pop %rsp
  2279. *
  2280. * Where:
  2281. *
  2282. * 1 - places a pointer to the previous
  2283. * stack at the Top-of-Stack of the
  2284. * new stack.
  2285. *
  2286. * 2 - switches to the new stack.
  2287. *
  2288. * 3 - pops the Top-of-Stack to restore
  2289. * the original stack.
  2290. *
  2291. * Note: we set base to SP_INDIRECT
  2292. * here and preserve offset. Therefore
  2293. * when the unwinder reaches ToS it
  2294. * will dereference SP and then add the
  2295. * offset to find the next frame, IOW:
  2296. * (%rsp) + offset.
  2297. */
  2298. cfa->base = CFI_SP_INDIRECT;
  2299. } else {
  2300. cfa->base = CFI_UNDEFINED;
  2301. cfa->offset = 0;
  2302. }
  2303. }
  2304. else if (op->dest.reg == CFI_SP &&
  2305. cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
  2306. cfi->vals[op->src.reg].offset == cfa->offset) {
  2307. /*
  2308. * The same stack swizzle case 2) as above. But
  2309. * because we can't change cfa->base, case 3)
  2310. * will become a regular POP. Pretend we're a
  2311. * PUSH so things don't go unbalanced.
  2312. */
  2313. cfi->stack_size += 8;
  2314. }
  2315. break;
  2316. case OP_SRC_ADD:
  2317. if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
  2318. /* add imm, %rsp */
  2319. cfi->stack_size -= op->src.offset;
  2320. if (cfa->base == CFI_SP)
  2321. cfa->offset -= op->src.offset;
  2322. break;
  2323. }
  2324. if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
  2325. /* lea disp(%rbp), %rsp */
  2326. cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
  2327. break;
  2328. }
  2329. if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
  2330. /* drap: lea disp(%rsp), %drap */
  2331. cfi->drap_reg = op->dest.reg;
  2332. /*
  2333. * lea disp(%rsp), %reg
  2334. *
  2335. * This is needed for the rare case where GCC
  2336. * does something dumb like:
  2337. *
  2338. * lea 0x8(%rsp), %rcx
  2339. * ...
  2340. * mov %rcx, %rsp
  2341. */
  2342. cfi->vals[op->dest.reg].base = CFI_CFA;
  2343. cfi->vals[op->dest.reg].offset = \
  2344. -cfi->stack_size + op->src.offset;
  2345. break;
  2346. }
  2347. if (cfi->drap && op->dest.reg == CFI_SP &&
  2348. op->src.reg == cfi->drap_reg) {
  2349. /* drap: lea disp(%drap), %rsp */
  2350. cfa->base = CFI_SP;
  2351. cfa->offset = cfi->stack_size = -op->src.offset;
  2352. cfi->drap_reg = CFI_UNDEFINED;
  2353. cfi->drap = false;
  2354. break;
  2355. }
  2356. if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
  2357. WARN_FUNC("unsupported stack register modification",
  2358. insn->sec, insn->offset);
  2359. return -1;
  2360. }
  2361. break;
  2362. case OP_SRC_AND:
  2363. if (op->dest.reg != CFI_SP ||
  2364. (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
  2365. (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
  2366. WARN_FUNC("unsupported stack pointer realignment",
  2367. insn->sec, insn->offset);
  2368. return -1;
  2369. }
  2370. if (cfi->drap_reg != CFI_UNDEFINED) {
  2371. /* drap: and imm, %rsp */
  2372. cfa->base = cfi->drap_reg;
  2373. cfa->offset = cfi->stack_size = 0;
  2374. cfi->drap = true;
  2375. }
  2376. /*
  2377. * Older versions of GCC (4.8ish) realign the stack
  2378. * without DRAP, with a frame pointer.
  2379. */
  2380. break;
  2381. case OP_SRC_POP:
  2382. case OP_SRC_POPF:
  2383. if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
  2384. /* pop %rsp; # restore from a stack swizzle */
  2385. cfa->base = CFI_SP;
  2386. break;
  2387. }
  2388. if (!cfi->drap && op->dest.reg == cfa->base) {
  2389. /* pop %rbp */
  2390. cfa->base = CFI_SP;
  2391. }
  2392. if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
  2393. op->dest.reg == cfi->drap_reg &&
  2394. cfi->drap_offset == -cfi->stack_size) {
  2395. /* drap: pop %drap */
  2396. cfa->base = cfi->drap_reg;
  2397. cfa->offset = 0;
  2398. cfi->drap_offset = -1;
  2399. } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
  2400. /* pop %reg */
  2401. restore_reg(cfi, op->dest.reg);
  2402. }
  2403. cfi->stack_size -= 8;
  2404. if (cfa->base == CFI_SP)
  2405. cfa->offset -= 8;
  2406. break;
  2407. case OP_SRC_REG_INDIRECT:
  2408. if (!cfi->drap && op->dest.reg == cfa->base &&
  2409. op->dest.reg == CFI_BP) {
  2410. /* mov disp(%rsp), %rbp */
  2411. cfa->base = CFI_SP;
  2412. cfa->offset = cfi->stack_size;
  2413. }
  2414. if (cfi->drap && op->src.reg == CFI_BP &&
  2415. op->src.offset == cfi->drap_offset) {
  2416. /* drap: mov disp(%rbp), %drap */
  2417. cfa->base = cfi->drap_reg;
  2418. cfa->offset = 0;
  2419. cfi->drap_offset = -1;
  2420. }
  2421. if (cfi->drap && op->src.reg == CFI_BP &&
  2422. op->src.offset == regs[op->dest.reg].offset) {
  2423. /* drap: mov disp(%rbp), %reg */
  2424. restore_reg(cfi, op->dest.reg);
  2425. } else if (op->src.reg == cfa->base &&
  2426. op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
  2427. /* mov disp(%rbp), %reg */
  2428. /* mov disp(%rsp), %reg */
  2429. restore_reg(cfi, op->dest.reg);
  2430. } else if (op->src.reg == CFI_SP &&
  2431. op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
  2432. /* mov disp(%rsp), %reg */
  2433. restore_reg(cfi, op->dest.reg);
  2434. }
  2435. break;
  2436. default:
  2437. WARN_FUNC("unknown stack-related instruction",
  2438. insn->sec, insn->offset);
  2439. return -1;
  2440. }
  2441. break;
  2442. case OP_DEST_PUSH:
  2443. case OP_DEST_PUSHF:
  2444. cfi->stack_size += 8;
  2445. if (cfa->base == CFI_SP)
  2446. cfa->offset += 8;
  2447. if (op->src.type != OP_SRC_REG)
  2448. break;
  2449. if (cfi->drap) {
  2450. if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
  2451. /* drap: push %drap */
  2452. cfa->base = CFI_BP_INDIRECT;
  2453. cfa->offset = -cfi->stack_size;
  2454. /* save drap so we know when to restore it */
  2455. cfi->drap_offset = -cfi->stack_size;
  2456. } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
  2457. /* drap: push %rbp */
  2458. cfi->stack_size = 0;
  2459. } else {
  2460. /* drap: push %reg */
  2461. save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
  2462. }
  2463. } else {
  2464. /* push %reg */
  2465. save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
  2466. }
  2467. /* detect when asm code uses rbp as a scratch register */
  2468. if (opts.stackval && insn->func && op->src.reg == CFI_BP &&
  2469. cfa->base != CFI_BP)
  2470. cfi->bp_scratch = true;
  2471. break;
  2472. case OP_DEST_REG_INDIRECT:
  2473. if (cfi->drap) {
  2474. if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
  2475. /* drap: mov %drap, disp(%rbp) */
  2476. cfa->base = CFI_BP_INDIRECT;
  2477. cfa->offset = op->dest.offset;
  2478. /* save drap offset so we know when to restore it */
  2479. cfi->drap_offset = op->dest.offset;
  2480. } else {
  2481. /* drap: mov reg, disp(%rbp) */
  2482. save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
  2483. }
  2484. } else if (op->dest.reg == cfa->base) {
  2485. /* mov reg, disp(%rbp) */
  2486. /* mov reg, disp(%rsp) */
  2487. save_reg(cfi, op->src.reg, CFI_CFA,
  2488. op->dest.offset - cfi->cfa.offset);
  2489. } else if (op->dest.reg == CFI_SP) {
  2490. /* mov reg, disp(%rsp) */
  2491. save_reg(cfi, op->src.reg, CFI_CFA,
  2492. op->dest.offset - cfi->stack_size);
  2493. } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
  2494. /* mov %rsp, (%reg); # setup a stack swizzle. */
  2495. cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
  2496. cfi->vals[op->dest.reg].offset = cfa->offset;
  2497. }
  2498. break;
  2499. case OP_DEST_MEM:
  2500. if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
  2501. WARN_FUNC("unknown stack-related memory operation",
  2502. insn->sec, insn->offset);
  2503. return -1;
  2504. }
  2505. /* pop mem */
  2506. cfi->stack_size -= 8;
  2507. if (cfa->base == CFI_SP)
  2508. cfa->offset -= 8;
  2509. break;
  2510. default:
  2511. WARN_FUNC("unknown stack-related instruction",
  2512. insn->sec, insn->offset);
  2513. return -1;
  2514. }
  2515. return 0;
  2516. }
  2517. /*
  2518. * The stack layouts of alternatives instructions can sometimes diverge when
  2519. * they have stack modifications. That's fine as long as the potential stack
  2520. * layouts don't conflict at any given potential instruction boundary.
  2521. *
  2522. * Flatten the CFIs of the different alternative code streams (both original
  2523. * and replacement) into a single shared CFI array which can be used to detect
  2524. * conflicts and nicely feed a linear array of ORC entries to the unwinder.
  2525. */
  2526. static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
  2527. {
  2528. struct cfi_state **alt_cfi;
  2529. int group_off;
  2530. if (!insn->alt_group)
  2531. return 0;
  2532. if (!insn->cfi) {
  2533. WARN("CFI missing");
  2534. return -1;
  2535. }
  2536. alt_cfi = insn->alt_group->cfi;
  2537. group_off = insn->offset - insn->alt_group->first_insn->offset;
  2538. if (!alt_cfi[group_off]) {
  2539. alt_cfi[group_off] = insn->cfi;
  2540. } else {
  2541. if (cficmp(alt_cfi[group_off], insn->cfi)) {
  2542. WARN_FUNC("stack layout conflict in alternatives",
  2543. insn->sec, insn->offset);
  2544. return -1;
  2545. }
  2546. }
  2547. return 0;
  2548. }
  2549. static int handle_insn_ops(struct instruction *insn,
  2550. struct instruction *next_insn,
  2551. struct insn_state *state)
  2552. {
  2553. struct stack_op *op;
  2554. list_for_each_entry(op, &insn->stack_ops, list) {
  2555. if (update_cfi_state(insn, next_insn, &state->cfi, op))
  2556. return 1;
  2557. if (!insn->alt_group)
  2558. continue;
  2559. if (op->dest.type == OP_DEST_PUSHF) {
  2560. if (!state->uaccess_stack) {
  2561. state->uaccess_stack = 1;
  2562. } else if (state->uaccess_stack >> 31) {
  2563. WARN_FUNC("PUSHF stack exhausted",
  2564. insn->sec, insn->offset);
  2565. return 1;
  2566. }
  2567. state->uaccess_stack <<= 1;
  2568. state->uaccess_stack |= state->uaccess;
  2569. }
  2570. if (op->src.type == OP_SRC_POPF) {
  2571. if (state->uaccess_stack) {
  2572. state->uaccess = state->uaccess_stack & 1;
  2573. state->uaccess_stack >>= 1;
  2574. if (state->uaccess_stack == 1)
  2575. state->uaccess_stack = 0;
  2576. }
  2577. }
  2578. }
  2579. return 0;
  2580. }
  2581. static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
  2582. {
  2583. struct cfi_state *cfi1 = insn->cfi;
  2584. int i;
  2585. if (!cfi1) {
  2586. WARN("CFI missing");
  2587. return false;
  2588. }
  2589. if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
  2590. WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
  2591. insn->sec, insn->offset,
  2592. cfi1->cfa.base, cfi1->cfa.offset,
  2593. cfi2->cfa.base, cfi2->cfa.offset);
  2594. } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
  2595. for (i = 0; i < CFI_NUM_REGS; i++) {
  2596. if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
  2597. sizeof(struct cfi_reg)))
  2598. continue;
  2599. WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
  2600. insn->sec, insn->offset,
  2601. i, cfi1->regs[i].base, cfi1->regs[i].offset,
  2602. i, cfi2->regs[i].base, cfi2->regs[i].offset);
  2603. break;
  2604. }
  2605. } else if (cfi1->type != cfi2->type) {
  2606. WARN_FUNC("stack state mismatch: type1=%d type2=%d",
  2607. insn->sec, insn->offset, cfi1->type, cfi2->type);
  2608. } else if (cfi1->drap != cfi2->drap ||
  2609. (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
  2610. (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
  2611. WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
  2612. insn->sec, insn->offset,
  2613. cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
  2614. cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
  2615. } else
  2616. return true;
  2617. return false;
  2618. }
  2619. static inline bool func_uaccess_safe(struct symbol *func)
  2620. {
  2621. if (func)
  2622. return func->uaccess_safe;
  2623. return false;
  2624. }
  2625. static inline const char *call_dest_name(struct instruction *insn)
  2626. {
  2627. static char pvname[19];
  2628. struct reloc *rel;
  2629. int idx;
  2630. if (insn->call_dest)
  2631. return insn->call_dest->name;
  2632. rel = insn_reloc(NULL, insn);
  2633. if (rel && !strcmp(rel->sym->name, "pv_ops")) {
  2634. idx = (rel->addend / sizeof(void *));
  2635. snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
  2636. return pvname;
  2637. }
  2638. return "{dynamic}";
  2639. }
  2640. static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
  2641. {
  2642. struct symbol *target;
  2643. struct reloc *rel;
  2644. int idx;
  2645. rel = insn_reloc(file, insn);
  2646. if (!rel || strcmp(rel->sym->name, "pv_ops"))
  2647. return false;
  2648. idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
  2649. if (file->pv_ops[idx].clean)
  2650. return true;
  2651. file->pv_ops[idx].clean = true;
  2652. list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
  2653. if (!target->sec->noinstr) {
  2654. WARN("pv_ops[%d]: %s", idx, target->name);
  2655. file->pv_ops[idx].clean = false;
  2656. }
  2657. }
  2658. return file->pv_ops[idx].clean;
  2659. }
  2660. static inline bool noinstr_call_dest(struct objtool_file *file,
  2661. struct instruction *insn,
  2662. struct symbol *func)
  2663. {
  2664. /*
  2665. * We can't deal with indirect function calls at present;
  2666. * assume they're instrumented.
  2667. */
  2668. if (!func) {
  2669. if (file->pv_ops)
  2670. return pv_call_dest(file, insn);
  2671. return false;
  2672. }
  2673. /*
  2674. * If the symbol is from a noinstr section; we good.
  2675. */
  2676. if (func->sec->noinstr)
  2677. return true;
  2678. /*
  2679. * The __ubsan_handle_*() calls are like WARN(), they only happen when
  2680. * something 'BAD' happened. At the risk of taking the machine down,
  2681. * let them proceed to get the message out.
  2682. */
  2683. if (!strncmp(func->name, "__ubsan_handle_", 15))
  2684. return true;
  2685. return false;
  2686. }
  2687. static int validate_call(struct objtool_file *file,
  2688. struct instruction *insn,
  2689. struct insn_state *state)
  2690. {
  2691. if (state->noinstr && state->instr <= 0 &&
  2692. !noinstr_call_dest(file, insn, insn->call_dest)) {
  2693. WARN_FUNC("call to %s() leaves .noinstr.text section",
  2694. insn->sec, insn->offset, call_dest_name(insn));
  2695. return 1;
  2696. }
  2697. if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
  2698. WARN_FUNC("call to %s() with UACCESS enabled",
  2699. insn->sec, insn->offset, call_dest_name(insn));
  2700. return 1;
  2701. }
  2702. if (state->df) {
  2703. WARN_FUNC("call to %s() with DF set",
  2704. insn->sec, insn->offset, call_dest_name(insn));
  2705. return 1;
  2706. }
  2707. return 0;
  2708. }
  2709. static int validate_sibling_call(struct objtool_file *file,
  2710. struct instruction *insn,
  2711. struct insn_state *state)
  2712. {
  2713. if (has_modified_stack_frame(insn, state)) {
  2714. WARN_FUNC("sibling call from callable instruction with modified stack frame",
  2715. insn->sec, insn->offset);
  2716. return 1;
  2717. }
  2718. return validate_call(file, insn, state);
  2719. }
  2720. static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
  2721. {
  2722. if (state->noinstr && state->instr > 0) {
  2723. WARN_FUNC("return with instrumentation enabled",
  2724. insn->sec, insn->offset);
  2725. return 1;
  2726. }
  2727. if (state->uaccess && !func_uaccess_safe(func)) {
  2728. WARN_FUNC("return with UACCESS enabled",
  2729. insn->sec, insn->offset);
  2730. return 1;
  2731. }
  2732. if (!state->uaccess && func_uaccess_safe(func)) {
  2733. WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
  2734. insn->sec, insn->offset);
  2735. return 1;
  2736. }
  2737. if (state->df) {
  2738. WARN_FUNC("return with DF set",
  2739. insn->sec, insn->offset);
  2740. return 1;
  2741. }
  2742. if (func && has_modified_stack_frame(insn, state)) {
  2743. WARN_FUNC("return with modified stack frame",
  2744. insn->sec, insn->offset);
  2745. return 1;
  2746. }
  2747. if (state->cfi.bp_scratch) {
  2748. WARN_FUNC("BP used as a scratch register",
  2749. insn->sec, insn->offset);
  2750. return 1;
  2751. }
  2752. return 0;
  2753. }
  2754. static struct instruction *next_insn_to_validate(struct objtool_file *file,
  2755. struct instruction *insn)
  2756. {
  2757. struct alt_group *alt_group = insn->alt_group;
  2758. /*
  2759. * Simulate the fact that alternatives are patched in-place. When the
  2760. * end of a replacement alt_group is reached, redirect objtool flow to
  2761. * the end of the original alt_group.
  2762. */
  2763. if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
  2764. return next_insn_same_sec(file, alt_group->orig_group->last_insn);
  2765. return next_insn_same_sec(file, insn);
  2766. }
  2767. /*
  2768. * Follow the branch starting at the given instruction, and recursively follow
  2769. * any other branches (jumps). Meanwhile, track the frame pointer state at
  2770. * each instruction and validate all the rules described in
  2771. * tools/objtool/Documentation/objtool.txt.
  2772. */
  2773. static int validate_branch(struct objtool_file *file, struct symbol *func,
  2774. struct instruction *insn, struct insn_state state)
  2775. {
  2776. struct alternative *alt;
  2777. struct instruction *next_insn, *prev_insn = NULL;
  2778. struct section *sec;
  2779. u8 visited;
  2780. int ret;
  2781. sec = insn->sec;
  2782. while (1) {
  2783. next_insn = next_insn_to_validate(file, insn);
  2784. if (func && insn->func && func != insn->func->pfunc) {
  2785. /* Ignore KCFI type preambles, which always fall through */
  2786. if (!strncmp(func->name, "__cfi_", 6))
  2787. return 0;
  2788. WARN("%s() falls through to next function %s()",
  2789. func->name, insn->func->name);
  2790. return 1;
  2791. }
  2792. if (func && insn->ignore) {
  2793. WARN_FUNC("BUG: why am I validating an ignored function?",
  2794. sec, insn->offset);
  2795. return 1;
  2796. }
  2797. visited = VISITED_BRANCH << state.uaccess;
  2798. if (insn->visited & VISITED_BRANCH_MASK) {
  2799. if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
  2800. return 1;
  2801. if (insn->visited & visited)
  2802. return 0;
  2803. } else {
  2804. nr_insns_visited++;
  2805. }
  2806. if (state.noinstr)
  2807. state.instr += insn->instr;
  2808. if (insn->hint) {
  2809. if (insn->restore) {
  2810. struct instruction *save_insn, *i;
  2811. i = insn;
  2812. save_insn = NULL;
  2813. sym_for_each_insn_continue_reverse(file, func, i) {
  2814. if (i->save) {
  2815. save_insn = i;
  2816. break;
  2817. }
  2818. }
  2819. if (!save_insn) {
  2820. WARN_FUNC("no corresponding CFI save for CFI restore",
  2821. sec, insn->offset);
  2822. return 1;
  2823. }
  2824. if (!save_insn->visited) {
  2825. WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
  2826. sec, insn->offset);
  2827. return 1;
  2828. }
  2829. insn->cfi = save_insn->cfi;
  2830. nr_cfi_reused++;
  2831. }
  2832. state.cfi = *insn->cfi;
  2833. } else {
  2834. /* XXX track if we actually changed state.cfi */
  2835. if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
  2836. insn->cfi = prev_insn->cfi;
  2837. nr_cfi_reused++;
  2838. } else {
  2839. insn->cfi = cfi_hash_find_or_add(&state.cfi);
  2840. }
  2841. }
  2842. insn->visited |= visited;
  2843. if (propagate_alt_cfi(file, insn))
  2844. return 1;
  2845. if (!insn->ignore_alts && !list_empty(&insn->alts)) {
  2846. bool skip_orig = false;
  2847. list_for_each_entry(alt, &insn->alts, list) {
  2848. if (alt->skip_orig)
  2849. skip_orig = true;
  2850. ret = validate_branch(file, func, alt->insn, state);
  2851. if (ret) {
  2852. if (opts.backtrace)
  2853. BT_FUNC("(alt)", insn);
  2854. return ret;
  2855. }
  2856. }
  2857. if (skip_orig)
  2858. return 0;
  2859. }
  2860. if (handle_insn_ops(insn, next_insn, &state))
  2861. return 1;
  2862. switch (insn->type) {
  2863. case INSN_RETURN:
  2864. return validate_return(func, insn, &state);
  2865. case INSN_CALL:
  2866. case INSN_CALL_DYNAMIC:
  2867. ret = validate_call(file, insn, &state);
  2868. if (ret)
  2869. return ret;
  2870. if (opts.stackval && func && !is_special_call(insn) &&
  2871. !has_valid_stack_frame(&state)) {
  2872. WARN_FUNC("call without frame pointer save/setup",
  2873. sec, insn->offset);
  2874. return 1;
  2875. }
  2876. if (insn->dead_end)
  2877. return 0;
  2878. break;
  2879. case INSN_JUMP_CONDITIONAL:
  2880. case INSN_JUMP_UNCONDITIONAL:
  2881. if (is_sibling_call(insn)) {
  2882. ret = validate_sibling_call(file, insn, &state);
  2883. if (ret)
  2884. return ret;
  2885. } else if (insn->jump_dest) {
  2886. ret = validate_branch(file, func,
  2887. insn->jump_dest, state);
  2888. if (ret) {
  2889. if (opts.backtrace)
  2890. BT_FUNC("(branch)", insn);
  2891. return ret;
  2892. }
  2893. }
  2894. if (insn->type == INSN_JUMP_UNCONDITIONAL)
  2895. return 0;
  2896. break;
  2897. case INSN_JUMP_DYNAMIC:
  2898. case INSN_JUMP_DYNAMIC_CONDITIONAL:
  2899. if (is_sibling_call(insn)) {
  2900. ret = validate_sibling_call(file, insn, &state);
  2901. if (ret)
  2902. return ret;
  2903. }
  2904. if (insn->type == INSN_JUMP_DYNAMIC)
  2905. return 0;
  2906. break;
  2907. case INSN_CONTEXT_SWITCH:
  2908. if (func && (!next_insn || !next_insn->hint)) {
  2909. WARN_FUNC("unsupported instruction in callable function",
  2910. sec, insn->offset);
  2911. return 1;
  2912. }
  2913. return 0;
  2914. case INSN_STAC:
  2915. if (state.uaccess) {
  2916. WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
  2917. return 1;
  2918. }
  2919. state.uaccess = true;
  2920. break;
  2921. case INSN_CLAC:
  2922. if (!state.uaccess && func) {
  2923. WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
  2924. return 1;
  2925. }
  2926. if (func_uaccess_safe(func) && !state.uaccess_stack) {
  2927. WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
  2928. return 1;
  2929. }
  2930. state.uaccess = false;
  2931. break;
  2932. case INSN_STD:
  2933. if (state.df) {
  2934. WARN_FUNC("recursive STD", sec, insn->offset);
  2935. return 1;
  2936. }
  2937. state.df = true;
  2938. break;
  2939. case INSN_CLD:
  2940. if (!state.df && func) {
  2941. WARN_FUNC("redundant CLD", sec, insn->offset);
  2942. return 1;
  2943. }
  2944. state.df = false;
  2945. break;
  2946. default:
  2947. break;
  2948. }
  2949. if (insn->dead_end)
  2950. return 0;
  2951. if (!next_insn) {
  2952. if (state.cfi.cfa.base == CFI_UNDEFINED)
  2953. return 0;
  2954. WARN("%s: unexpected end of section", sec->name);
  2955. return 1;
  2956. }
  2957. prev_insn = insn;
  2958. insn = next_insn;
  2959. }
  2960. return 0;
  2961. }
  2962. static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
  2963. {
  2964. struct instruction *insn;
  2965. struct insn_state state;
  2966. int ret, warnings = 0;
  2967. if (!file->hints)
  2968. return 0;
  2969. init_insn_state(file, &state, sec);
  2970. if (sec) {
  2971. insn = find_insn(file, sec, 0);
  2972. if (!insn)
  2973. return 0;
  2974. } else {
  2975. insn = list_first_entry(&file->insn_list, typeof(*insn), list);
  2976. }
  2977. while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
  2978. if (insn->hint && !insn->visited && !insn->ignore) {
  2979. ret = validate_branch(file, insn->func, insn, state);
  2980. if (ret && opts.backtrace)
  2981. BT_FUNC("<=== (hint)", insn);
  2982. warnings += ret;
  2983. }
  2984. insn = list_next_entry(insn, list);
  2985. }
  2986. return warnings;
  2987. }
  2988. /*
  2989. * Validate rethunk entry constraint: must untrain RET before the first RET.
  2990. *
  2991. * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
  2992. * before an actual RET instruction.
  2993. */
  2994. static int validate_entry(struct objtool_file *file, struct instruction *insn)
  2995. {
  2996. struct instruction *next, *dest;
  2997. int ret, warnings = 0;
  2998. for (;;) {
  2999. next = next_insn_to_validate(file, insn);
  3000. if (insn->visited & VISITED_ENTRY)
  3001. return 0;
  3002. insn->visited |= VISITED_ENTRY;
  3003. if (!insn->ignore_alts && !list_empty(&insn->alts)) {
  3004. struct alternative *alt;
  3005. bool skip_orig = false;
  3006. list_for_each_entry(alt, &insn->alts, list) {
  3007. if (alt->skip_orig)
  3008. skip_orig = true;
  3009. ret = validate_entry(file, alt->insn);
  3010. if (ret) {
  3011. if (opts.backtrace)
  3012. BT_FUNC("(alt)", insn);
  3013. return ret;
  3014. }
  3015. }
  3016. if (skip_orig)
  3017. return 0;
  3018. }
  3019. switch (insn->type) {
  3020. case INSN_CALL_DYNAMIC:
  3021. case INSN_JUMP_DYNAMIC:
  3022. case INSN_JUMP_DYNAMIC_CONDITIONAL:
  3023. WARN_FUNC("early indirect call", insn->sec, insn->offset);
  3024. return 1;
  3025. case INSN_JUMP_UNCONDITIONAL:
  3026. case INSN_JUMP_CONDITIONAL:
  3027. if (!is_sibling_call(insn)) {
  3028. if (!insn->jump_dest) {
  3029. WARN_FUNC("unresolved jump target after linking?!?",
  3030. insn->sec, insn->offset);
  3031. return -1;
  3032. }
  3033. ret = validate_entry(file, insn->jump_dest);
  3034. if (ret) {
  3035. if (opts.backtrace) {
  3036. BT_FUNC("(branch%s)", insn,
  3037. insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
  3038. }
  3039. return ret;
  3040. }
  3041. if (insn->type == INSN_JUMP_UNCONDITIONAL)
  3042. return 0;
  3043. break;
  3044. }
  3045. /* fallthrough */
  3046. case INSN_CALL:
  3047. dest = find_insn(file, insn->call_dest->sec,
  3048. insn->call_dest->offset);
  3049. if (!dest) {
  3050. WARN("Unresolved function after linking!?: %s",
  3051. insn->call_dest->name);
  3052. return -1;
  3053. }
  3054. ret = validate_entry(file, dest);
  3055. if (ret) {
  3056. if (opts.backtrace)
  3057. BT_FUNC("(call)", insn);
  3058. return ret;
  3059. }
  3060. /*
  3061. * If a call returns without error, it must have seen UNTRAIN_RET.
  3062. * Therefore any non-error return is a success.
  3063. */
  3064. return 0;
  3065. case INSN_RETURN:
  3066. WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
  3067. return 1;
  3068. case INSN_NOP:
  3069. if (insn->retpoline_safe)
  3070. return 0;
  3071. break;
  3072. default:
  3073. break;
  3074. }
  3075. if (!next) {
  3076. WARN_FUNC("teh end!", insn->sec, insn->offset);
  3077. return -1;
  3078. }
  3079. insn = next;
  3080. }
  3081. return warnings;
  3082. }
  3083. /*
  3084. * Validate that all branches starting at 'insn->entry' encounter UNRET_END
  3085. * before RET.
  3086. */
  3087. static int validate_unret(struct objtool_file *file)
  3088. {
  3089. struct instruction *insn;
  3090. int ret, warnings = 0;
  3091. for_each_insn(file, insn) {
  3092. if (!insn->entry)
  3093. continue;
  3094. ret = validate_entry(file, insn);
  3095. if (ret < 0) {
  3096. WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
  3097. return ret;
  3098. }
  3099. warnings += ret;
  3100. }
  3101. return warnings;
  3102. }
  3103. static int validate_retpoline(struct objtool_file *file)
  3104. {
  3105. struct instruction *insn;
  3106. int warnings = 0;
  3107. for_each_insn(file, insn) {
  3108. if (insn->type != INSN_JUMP_DYNAMIC &&
  3109. insn->type != INSN_CALL_DYNAMIC &&
  3110. insn->type != INSN_RETURN)
  3111. continue;
  3112. if (insn->retpoline_safe)
  3113. continue;
  3114. /*
  3115. * .init.text code is ran before userspace and thus doesn't
  3116. * strictly need retpolines, except for modules which are
  3117. * loaded late, they very much do need retpoline in their
  3118. * .init.text
  3119. */
  3120. if (!strcmp(insn->sec->name, ".init.text") && !opts.module)
  3121. continue;
  3122. if (insn->type == INSN_RETURN) {
  3123. if (opts.rethunk) {
  3124. WARN_FUNC("'naked' return found in RETHUNK build",
  3125. insn->sec, insn->offset);
  3126. } else
  3127. continue;
  3128. } else {
  3129. WARN_FUNC("indirect %s found in RETPOLINE build",
  3130. insn->sec, insn->offset,
  3131. insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
  3132. }
  3133. warnings++;
  3134. }
  3135. return warnings;
  3136. }
  3137. static bool is_kasan_insn(struct instruction *insn)
  3138. {
  3139. return (insn->type == INSN_CALL &&
  3140. !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
  3141. }
  3142. static bool is_ubsan_insn(struct instruction *insn)
  3143. {
  3144. return (insn->type == INSN_CALL &&
  3145. !strcmp(insn->call_dest->name,
  3146. "__ubsan_handle_builtin_unreachable"));
  3147. }
  3148. static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
  3149. {
  3150. int i;
  3151. struct instruction *prev_insn;
  3152. if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
  3153. return true;
  3154. /*
  3155. * Ignore alternative replacement instructions. This can happen
  3156. * when a whitelisted function uses one of the ALTERNATIVE macros.
  3157. */
  3158. if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
  3159. !strcmp(insn->sec->name, ".altinstr_aux"))
  3160. return true;
  3161. /*
  3162. * Whole archive runs might encounter dead code from weak symbols.
  3163. * This is where the linker will have dropped the weak symbol in
  3164. * favour of a regular symbol, but leaves the code in place.
  3165. *
  3166. * In this case we'll find a piece of code (whole function) that is not
  3167. * covered by a !section symbol. Ignore them.
  3168. */
  3169. if (opts.link && !insn->func) {
  3170. int size = find_symbol_hole_containing(insn->sec, insn->offset);
  3171. unsigned long end = insn->offset + size;
  3172. if (!size) /* not a hole */
  3173. return false;
  3174. if (size < 0) /* hole until the end */
  3175. return true;
  3176. sec_for_each_insn_continue(file, insn) {
  3177. /*
  3178. * If we reach a visited instruction at or before the
  3179. * end of the hole, ignore the unreachable.
  3180. */
  3181. if (insn->visited)
  3182. return true;
  3183. if (insn->offset >= end)
  3184. break;
  3185. /*
  3186. * If this hole jumps to a .cold function, mark it ignore too.
  3187. */
  3188. if (insn->jump_dest && insn->jump_dest->func &&
  3189. strstr(insn->jump_dest->func->name, ".cold")) {
  3190. struct instruction *dest = insn->jump_dest;
  3191. func_for_each_insn(file, dest->func, dest)
  3192. dest->ignore = true;
  3193. }
  3194. }
  3195. return false;
  3196. }
  3197. if (!insn->func)
  3198. return false;
  3199. if (insn->func->static_call_tramp)
  3200. return true;
  3201. /*
  3202. * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
  3203. * __builtin_unreachable(). The BUG() macro has an unreachable() after
  3204. * the UD2, which causes GCC's undefined trap logic to emit another UD2
  3205. * (or occasionally a JMP to UD2).
  3206. *
  3207. * It may also insert a UD2 after calling a __noreturn function.
  3208. */
  3209. prev_insn = list_prev_entry(insn, list);
  3210. if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
  3211. (insn->type == INSN_BUG ||
  3212. (insn->type == INSN_JUMP_UNCONDITIONAL &&
  3213. insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
  3214. return true;
  3215. /*
  3216. * Check if this (or a subsequent) instruction is related to
  3217. * CONFIG_UBSAN or CONFIG_KASAN.
  3218. *
  3219. * End the search at 5 instructions to avoid going into the weeds.
  3220. */
  3221. for (i = 0; i < 5; i++) {
  3222. if (is_kasan_insn(insn) || is_ubsan_insn(insn))
  3223. return true;
  3224. if (insn->type == INSN_JUMP_UNCONDITIONAL) {
  3225. if (insn->jump_dest &&
  3226. insn->jump_dest->func == insn->func) {
  3227. insn = insn->jump_dest;
  3228. continue;
  3229. }
  3230. break;
  3231. }
  3232. if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
  3233. break;
  3234. insn = list_next_entry(insn, list);
  3235. }
  3236. return false;
  3237. }
  3238. static int validate_symbol(struct objtool_file *file, struct section *sec,
  3239. struct symbol *sym, struct insn_state *state)
  3240. {
  3241. struct instruction *insn;
  3242. int ret;
  3243. if (!sym->len) {
  3244. WARN("%s() is missing an ELF size annotation", sym->name);
  3245. return 1;
  3246. }
  3247. if (sym->pfunc != sym || sym->alias != sym)
  3248. return 0;
  3249. insn = find_insn(file, sec, sym->offset);
  3250. if (!insn || insn->ignore || insn->visited)
  3251. return 0;
  3252. state->uaccess = sym->uaccess_safe;
  3253. ret = validate_branch(file, insn->func, insn, *state);
  3254. if (ret && opts.backtrace)
  3255. BT_FUNC("<=== (sym)", insn);
  3256. return ret;
  3257. }
  3258. static int validate_section(struct objtool_file *file, struct section *sec)
  3259. {
  3260. struct insn_state state;
  3261. struct symbol *func;
  3262. int warnings = 0;
  3263. list_for_each_entry(func, &sec->symbol_list, list) {
  3264. if (func->type != STT_FUNC)
  3265. continue;
  3266. init_insn_state(file, &state, sec);
  3267. set_func_state(&state.cfi);
  3268. warnings += validate_symbol(file, sec, func, &state);
  3269. }
  3270. return warnings;
  3271. }
  3272. static int validate_noinstr_sections(struct objtool_file *file)
  3273. {
  3274. struct section *sec;
  3275. int warnings = 0;
  3276. sec = find_section_by_name(file->elf, ".noinstr.text");
  3277. if (sec) {
  3278. warnings += validate_section(file, sec);
  3279. warnings += validate_unwind_hints(file, sec);
  3280. }
  3281. sec = find_section_by_name(file->elf, ".entry.text");
  3282. if (sec) {
  3283. warnings += validate_section(file, sec);
  3284. warnings += validate_unwind_hints(file, sec);
  3285. }
  3286. return warnings;
  3287. }
  3288. static int validate_functions(struct objtool_file *file)
  3289. {
  3290. struct section *sec;
  3291. int warnings = 0;
  3292. for_each_sec(file, sec) {
  3293. if (!(sec->sh.sh_flags & SHF_EXECINSTR))
  3294. continue;
  3295. warnings += validate_section(file, sec);
  3296. }
  3297. return warnings;
  3298. }
  3299. static void mark_endbr_used(struct instruction *insn)
  3300. {
  3301. if (!list_empty(&insn->call_node))
  3302. list_del_init(&insn->call_node);
  3303. }
  3304. static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
  3305. {
  3306. struct instruction *dest;
  3307. struct reloc *reloc;
  3308. unsigned long off;
  3309. int warnings = 0;
  3310. /*
  3311. * Looking for function pointer load relocations. Ignore
  3312. * direct/indirect branches:
  3313. */
  3314. switch (insn->type) {
  3315. case INSN_CALL:
  3316. case INSN_CALL_DYNAMIC:
  3317. case INSN_JUMP_CONDITIONAL:
  3318. case INSN_JUMP_UNCONDITIONAL:
  3319. case INSN_JUMP_DYNAMIC:
  3320. case INSN_JUMP_DYNAMIC_CONDITIONAL:
  3321. case INSN_RETURN:
  3322. case INSN_NOP:
  3323. return 0;
  3324. default:
  3325. break;
  3326. }
  3327. for (reloc = insn_reloc(file, insn);
  3328. reloc;
  3329. reloc = find_reloc_by_dest_range(file->elf, insn->sec,
  3330. reloc->offset + 1,
  3331. (insn->offset + insn->len) - (reloc->offset + 1))) {
  3332. /*
  3333. * static_call_update() references the trampoline, which
  3334. * doesn't have (or need) ENDBR. Skip warning in that case.
  3335. */
  3336. if (reloc->sym->static_call_tramp)
  3337. continue;
  3338. off = reloc->sym->offset;
  3339. if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
  3340. off += arch_dest_reloc_offset(reloc->addend);
  3341. else
  3342. off += reloc->addend;
  3343. dest = find_insn(file, reloc->sym->sec, off);
  3344. if (!dest)
  3345. continue;
  3346. if (dest->type == INSN_ENDBR) {
  3347. mark_endbr_used(dest);
  3348. continue;
  3349. }
  3350. if (dest->func && dest->func == insn->func) {
  3351. /*
  3352. * Anything from->to self is either _THIS_IP_ or
  3353. * IRET-to-self.
  3354. *
  3355. * There is no sane way to annotate _THIS_IP_ since the
  3356. * compiler treats the relocation as a constant and is
  3357. * happy to fold in offsets, skewing any annotation we
  3358. * do, leading to vast amounts of false-positives.
  3359. *
  3360. * There's also compiler generated _THIS_IP_ through
  3361. * KCOV and such which we have no hope of annotating.
  3362. *
  3363. * As such, blanket accept self-references without
  3364. * issue.
  3365. */
  3366. continue;
  3367. }
  3368. if (dest->noendbr)
  3369. continue;
  3370. WARN_FUNC("relocation to !ENDBR: %s",
  3371. insn->sec, insn->offset,
  3372. offstr(dest->sec, dest->offset));
  3373. warnings++;
  3374. }
  3375. return warnings;
  3376. }
  3377. static int validate_ibt_data_reloc(struct objtool_file *file,
  3378. struct reloc *reloc)
  3379. {
  3380. struct instruction *dest;
  3381. dest = find_insn(file, reloc->sym->sec,
  3382. reloc->sym->offset + reloc->addend);
  3383. if (!dest)
  3384. return 0;
  3385. if (dest->type == INSN_ENDBR) {
  3386. mark_endbr_used(dest);
  3387. return 0;
  3388. }
  3389. if (dest->noendbr)
  3390. return 0;
  3391. WARN_FUNC("data relocation to !ENDBR: %s",
  3392. reloc->sec->base, reloc->offset,
  3393. offstr(dest->sec, dest->offset));
  3394. return 1;
  3395. }
  3396. /*
  3397. * Validate IBT rules and remove used ENDBR instructions from the seal list.
  3398. * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
  3399. * NOPs) later, in create_ibt_endbr_seal_sections().
  3400. */
  3401. static int validate_ibt(struct objtool_file *file)
  3402. {
  3403. struct section *sec;
  3404. struct reloc *reloc;
  3405. struct instruction *insn;
  3406. int warnings = 0;
  3407. for_each_insn(file, insn)
  3408. warnings += validate_ibt_insn(file, insn);
  3409. for_each_sec(file, sec) {
  3410. /* Already done by validate_ibt_insn() */
  3411. if (sec->sh.sh_flags & SHF_EXECINSTR)
  3412. continue;
  3413. if (!sec->reloc)
  3414. continue;
  3415. /*
  3416. * These sections can reference text addresses, but not with
  3417. * the intent to indirect branch to them.
  3418. */
  3419. if ((!strncmp(sec->name, ".discard", 8) &&
  3420. strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
  3421. !strncmp(sec->name, ".debug", 6) ||
  3422. !strcmp(sec->name, ".altinstructions") ||
  3423. !strcmp(sec->name, ".ibt_endbr_seal") ||
  3424. !strcmp(sec->name, ".orc_unwind_ip") ||
  3425. !strcmp(sec->name, ".parainstructions") ||
  3426. !strcmp(sec->name, ".retpoline_sites") ||
  3427. !strcmp(sec->name, ".smp_locks") ||
  3428. !strcmp(sec->name, ".static_call_sites") ||
  3429. !strcmp(sec->name, "_error_injection_whitelist") ||
  3430. !strcmp(sec->name, "_kprobe_blacklist") ||
  3431. !strcmp(sec->name, "__bug_table") ||
  3432. !strcmp(sec->name, "__ex_table") ||
  3433. !strcmp(sec->name, "__jump_table") ||
  3434. !strcmp(sec->name, "__mcount_loc") ||
  3435. !strcmp(sec->name, ".kcfi_traps") ||
  3436. strstr(sec->name, "__patchable_function_entries"))
  3437. continue;
  3438. list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
  3439. warnings += validate_ibt_data_reloc(file, reloc);
  3440. }
  3441. return warnings;
  3442. }
  3443. static int validate_sls(struct objtool_file *file)
  3444. {
  3445. struct instruction *insn, *next_insn;
  3446. int warnings = 0;
  3447. for_each_insn(file, insn) {
  3448. next_insn = next_insn_same_sec(file, insn);
  3449. if (insn->retpoline_safe)
  3450. continue;
  3451. switch (insn->type) {
  3452. case INSN_RETURN:
  3453. if (!next_insn || next_insn->type != INSN_TRAP) {
  3454. WARN_FUNC("missing int3 after ret",
  3455. insn->sec, insn->offset);
  3456. warnings++;
  3457. }
  3458. break;
  3459. case INSN_JUMP_DYNAMIC:
  3460. if (!next_insn || next_insn->type != INSN_TRAP) {
  3461. WARN_FUNC("missing int3 after indirect jump",
  3462. insn->sec, insn->offset);
  3463. warnings++;
  3464. }
  3465. break;
  3466. default:
  3467. break;
  3468. }
  3469. }
  3470. return warnings;
  3471. }
  3472. static int validate_reachable_instructions(struct objtool_file *file)
  3473. {
  3474. struct instruction *insn;
  3475. if (file->ignore_unreachables)
  3476. return 0;
  3477. for_each_insn(file, insn) {
  3478. if (insn->visited || ignore_unreachable_insn(file, insn))
  3479. continue;
  3480. WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
  3481. return 1;
  3482. }
  3483. return 0;
  3484. }
  3485. int check(struct objtool_file *file)
  3486. {
  3487. int ret, warnings = 0;
  3488. arch_initial_func_cfi_state(&initial_func_cfi);
  3489. init_cfi_state(&init_cfi);
  3490. init_cfi_state(&func_cfi);
  3491. set_func_state(&func_cfi);
  3492. if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
  3493. goto out;
  3494. cfi_hash_add(&init_cfi);
  3495. cfi_hash_add(&func_cfi);
  3496. ret = decode_sections(file);
  3497. if (ret < 0)
  3498. goto out;
  3499. warnings += ret;
  3500. if (list_empty(&file->insn_list))
  3501. goto out;
  3502. if (opts.retpoline) {
  3503. ret = validate_retpoline(file);
  3504. if (ret < 0)
  3505. return ret;
  3506. warnings += ret;
  3507. }
  3508. if (opts.stackval || opts.orc || opts.uaccess) {
  3509. ret = validate_functions(file);
  3510. if (ret < 0)
  3511. goto out;
  3512. warnings += ret;
  3513. ret = validate_unwind_hints(file, NULL);
  3514. if (ret < 0)
  3515. goto out;
  3516. warnings += ret;
  3517. if (!warnings) {
  3518. ret = validate_reachable_instructions(file);
  3519. if (ret < 0)
  3520. goto out;
  3521. warnings += ret;
  3522. }
  3523. } else if (opts.noinstr) {
  3524. ret = validate_noinstr_sections(file);
  3525. if (ret < 0)
  3526. goto out;
  3527. warnings += ret;
  3528. }
  3529. if (opts.unret) {
  3530. /*
  3531. * Must be after validate_branch() and friends, it plays
  3532. * further games with insn->visited.
  3533. */
  3534. ret = validate_unret(file);
  3535. if (ret < 0)
  3536. return ret;
  3537. warnings += ret;
  3538. }
  3539. if (opts.ibt) {
  3540. ret = validate_ibt(file);
  3541. if (ret < 0)
  3542. goto out;
  3543. warnings += ret;
  3544. }
  3545. if (opts.sls) {
  3546. ret = validate_sls(file);
  3547. if (ret < 0)
  3548. goto out;
  3549. warnings += ret;
  3550. }
  3551. if (opts.static_call) {
  3552. ret = create_static_call_sections(file);
  3553. if (ret < 0)
  3554. goto out;
  3555. warnings += ret;
  3556. }
  3557. if (opts.retpoline) {
  3558. ret = create_retpoline_sites_sections(file);
  3559. if (ret < 0)
  3560. goto out;
  3561. warnings += ret;
  3562. }
  3563. if (opts.rethunk) {
  3564. ret = create_return_sites_sections(file);
  3565. if (ret < 0)
  3566. goto out;
  3567. warnings += ret;
  3568. }
  3569. if (opts.mcount) {
  3570. ret = create_mcount_loc_sections(file);
  3571. if (ret < 0)
  3572. goto out;
  3573. warnings += ret;
  3574. }
  3575. if (opts.ibt) {
  3576. ret = create_ibt_endbr_seal_sections(file);
  3577. if (ret < 0)
  3578. goto out;
  3579. warnings += ret;
  3580. }
  3581. if (opts.orc && !list_empty(&file->insn_list)) {
  3582. ret = orc_create(file);
  3583. if (ret < 0)
  3584. goto out;
  3585. warnings += ret;
  3586. }
  3587. if (opts.stats) {
  3588. printf("nr_insns_visited: %ld\n", nr_insns_visited);
  3589. printf("nr_cfi: %ld\n", nr_cfi);
  3590. printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
  3591. printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
  3592. }
  3593. out:
  3594. /*
  3595. * For now, don't fail the kernel build on fatal warnings. These
  3596. * errors are still fairly common due to the growing matrix of
  3597. * supported toolchains and their recent pace of change.
  3598. */
  3599. return 0;
  3600. }