btf_dump.c 68 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505
  1. // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2. /*
  3. * BTF-to-C type converter.
  4. *
  5. * Copyright (c) 2019 Facebook
  6. */
  7. #include <stdbool.h>
  8. #include <stddef.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #include <ctype.h>
  12. #include <endian.h>
  13. #include <errno.h>
  14. #include <linux/err.h>
  15. #include <linux/btf.h>
  16. #include <linux/kernel.h>
  17. #include "btf.h"
  18. #include "hashmap.h"
  19. #include "libbpf.h"
  20. #include "libbpf_internal.h"
  21. static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
  22. static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
  23. static const char *pfx(int lvl)
  24. {
  25. return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl];
  26. }
  27. enum btf_dump_type_order_state {
  28. NOT_ORDERED,
  29. ORDERING,
  30. ORDERED,
  31. };
  32. enum btf_dump_type_emit_state {
  33. NOT_EMITTED,
  34. EMITTING,
  35. EMITTED,
  36. };
  37. /* per-type auxiliary state */
  38. struct btf_dump_type_aux_state {
  39. /* topological sorting state */
  40. enum btf_dump_type_order_state order_state: 2;
  41. /* emitting state used to determine the need for forward declaration */
  42. enum btf_dump_type_emit_state emit_state: 2;
  43. /* whether forward declaration was already emitted */
  44. __u8 fwd_emitted: 1;
  45. /* whether unique non-duplicate name was already assigned */
  46. __u8 name_resolved: 1;
  47. /* whether type is referenced from any other type */
  48. __u8 referenced: 1;
  49. };
  50. /* indent string length; one indent string is added for each indent level */
  51. #define BTF_DATA_INDENT_STR_LEN 32
  52. /*
  53. * Common internal data for BTF type data dump operations.
  54. */
  55. struct btf_dump_data {
  56. const void *data_end; /* end of valid data to show */
  57. bool compact;
  58. bool skip_names;
  59. bool emit_zeroes;
  60. __u8 indent_lvl; /* base indent level */
  61. char indent_str[BTF_DATA_INDENT_STR_LEN];
  62. /* below are used during iteration */
  63. int depth;
  64. bool is_array_member;
  65. bool is_array_terminated;
  66. bool is_array_char;
  67. };
  68. struct btf_dump {
  69. const struct btf *btf;
  70. btf_dump_printf_fn_t printf_fn;
  71. void *cb_ctx;
  72. int ptr_sz;
  73. bool strip_mods;
  74. bool skip_anon_defs;
  75. int last_id;
  76. /* per-type auxiliary state */
  77. struct btf_dump_type_aux_state *type_states;
  78. size_t type_states_cap;
  79. /* per-type optional cached unique name, must be freed, if present */
  80. const char **cached_names;
  81. size_t cached_names_cap;
  82. /* topo-sorted list of dependent type definitions */
  83. __u32 *emit_queue;
  84. int emit_queue_cap;
  85. int emit_queue_cnt;
  86. /*
  87. * stack of type declarations (e.g., chain of modifiers, arrays,
  88. * funcs, etc)
  89. */
  90. __u32 *decl_stack;
  91. int decl_stack_cap;
  92. int decl_stack_cnt;
  93. /* maps struct/union/enum name to a number of name occurrences */
  94. struct hashmap *type_names;
  95. /*
  96. * maps typedef identifiers and enum value names to a number of such
  97. * name occurrences
  98. */
  99. struct hashmap *ident_names;
  100. /*
  101. * data for typed display; allocated if needed.
  102. */
  103. struct btf_dump_data *typed_dump;
  104. };
  105. static size_t str_hash_fn(const void *key, void *ctx)
  106. {
  107. return str_hash(key);
  108. }
  109. static bool str_equal_fn(const void *a, const void *b, void *ctx)
  110. {
  111. return strcmp(a, b) == 0;
  112. }
  113. static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
  114. {
  115. return btf__name_by_offset(d->btf, name_off);
  116. }
  117. static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
  118. {
  119. va_list args;
  120. va_start(args, fmt);
  121. d->printf_fn(d->cb_ctx, fmt, args);
  122. va_end(args);
  123. }
  124. static int btf_dump_mark_referenced(struct btf_dump *d);
  125. static int btf_dump_resize(struct btf_dump *d);
  126. struct btf_dump *btf_dump__new(const struct btf *btf,
  127. btf_dump_printf_fn_t printf_fn,
  128. void *ctx,
  129. const struct btf_dump_opts *opts)
  130. {
  131. struct btf_dump *d;
  132. int err;
  133. if (!OPTS_VALID(opts, btf_dump_opts))
  134. return libbpf_err_ptr(-EINVAL);
  135. if (!printf_fn)
  136. return libbpf_err_ptr(-EINVAL);
  137. d = calloc(1, sizeof(struct btf_dump));
  138. if (!d)
  139. return libbpf_err_ptr(-ENOMEM);
  140. d->btf = btf;
  141. d->printf_fn = printf_fn;
  142. d->cb_ctx = ctx;
  143. d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
  144. d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
  145. if (IS_ERR(d->type_names)) {
  146. err = PTR_ERR(d->type_names);
  147. d->type_names = NULL;
  148. goto err;
  149. }
  150. d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
  151. if (IS_ERR(d->ident_names)) {
  152. err = PTR_ERR(d->ident_names);
  153. d->ident_names = NULL;
  154. goto err;
  155. }
  156. err = btf_dump_resize(d);
  157. if (err)
  158. goto err;
  159. return d;
  160. err:
  161. btf_dump__free(d);
  162. return libbpf_err_ptr(err);
  163. }
  164. static int btf_dump_resize(struct btf_dump *d)
  165. {
  166. int err, last_id = btf__type_cnt(d->btf) - 1;
  167. if (last_id <= d->last_id)
  168. return 0;
  169. if (libbpf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
  170. sizeof(*d->type_states), last_id + 1))
  171. return -ENOMEM;
  172. if (libbpf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
  173. sizeof(*d->cached_names), last_id + 1))
  174. return -ENOMEM;
  175. if (d->last_id == 0) {
  176. /* VOID is special */
  177. d->type_states[0].order_state = ORDERED;
  178. d->type_states[0].emit_state = EMITTED;
  179. }
  180. /* eagerly determine referenced types for anon enums */
  181. err = btf_dump_mark_referenced(d);
  182. if (err)
  183. return err;
  184. d->last_id = last_id;
  185. return 0;
  186. }
  187. static void btf_dump_free_names(struct hashmap *map)
  188. {
  189. size_t bkt;
  190. struct hashmap_entry *cur;
  191. hashmap__for_each_entry(map, cur, bkt)
  192. free((void *)cur->key);
  193. hashmap__free(map);
  194. }
  195. void btf_dump__free(struct btf_dump *d)
  196. {
  197. int i;
  198. if (IS_ERR_OR_NULL(d))
  199. return;
  200. free(d->type_states);
  201. if (d->cached_names) {
  202. /* any set cached name is owned by us and should be freed */
  203. for (i = 0; i <= d->last_id; i++) {
  204. if (d->cached_names[i])
  205. free((void *)d->cached_names[i]);
  206. }
  207. }
  208. free(d->cached_names);
  209. free(d->emit_queue);
  210. free(d->decl_stack);
  211. btf_dump_free_names(d->type_names);
  212. btf_dump_free_names(d->ident_names);
  213. free(d);
  214. }
  215. static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr);
  216. static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id);
  217. /*
  218. * Dump BTF type in a compilable C syntax, including all the necessary
  219. * dependent types, necessary for compilation. If some of the dependent types
  220. * were already emitted as part of previous btf_dump__dump_type() invocation
  221. * for another type, they won't be emitted again. This API allows callers to
  222. * filter out BTF types according to user-defined criterias and emitted only
  223. * minimal subset of types, necessary to compile everything. Full struct/union
  224. * definitions will still be emitted, even if the only usage is through
  225. * pointer and could be satisfied with just a forward declaration.
  226. *
  227. * Dumping is done in two high-level passes:
  228. * 1. Topologically sort type definitions to satisfy C rules of compilation.
  229. * 2. Emit type definitions in C syntax.
  230. *
  231. * Returns 0 on success; <0, otherwise.
  232. */
  233. int btf_dump__dump_type(struct btf_dump *d, __u32 id)
  234. {
  235. int err, i;
  236. if (id >= btf__type_cnt(d->btf))
  237. return libbpf_err(-EINVAL);
  238. err = btf_dump_resize(d);
  239. if (err)
  240. return libbpf_err(err);
  241. d->emit_queue_cnt = 0;
  242. err = btf_dump_order_type(d, id, false);
  243. if (err < 0)
  244. return libbpf_err(err);
  245. for (i = 0; i < d->emit_queue_cnt; i++)
  246. btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/);
  247. return 0;
  248. }
  249. /*
  250. * Mark all types that are referenced from any other type. This is used to
  251. * determine top-level anonymous enums that need to be emitted as an
  252. * independent type declarations.
  253. * Anonymous enums come in two flavors: either embedded in a struct's field
  254. * definition, in which case they have to be declared inline as part of field
  255. * type declaration; or as a top-level anonymous enum, typically used for
  256. * declaring global constants. It's impossible to distinguish between two
  257. * without knowning whether given enum type was referenced from other type:
  258. * top-level anonymous enum won't be referenced by anything, while embedded
  259. * one will.
  260. */
  261. static int btf_dump_mark_referenced(struct btf_dump *d)
  262. {
  263. int i, j, n = btf__type_cnt(d->btf);
  264. const struct btf_type *t;
  265. __u16 vlen;
  266. for (i = d->last_id + 1; i < n; i++) {
  267. t = btf__type_by_id(d->btf, i);
  268. vlen = btf_vlen(t);
  269. switch (btf_kind(t)) {
  270. case BTF_KIND_INT:
  271. case BTF_KIND_ENUM:
  272. case BTF_KIND_ENUM64:
  273. case BTF_KIND_FWD:
  274. case BTF_KIND_FLOAT:
  275. break;
  276. case BTF_KIND_VOLATILE:
  277. case BTF_KIND_CONST:
  278. case BTF_KIND_RESTRICT:
  279. case BTF_KIND_PTR:
  280. case BTF_KIND_TYPEDEF:
  281. case BTF_KIND_FUNC:
  282. case BTF_KIND_VAR:
  283. case BTF_KIND_DECL_TAG:
  284. case BTF_KIND_TYPE_TAG:
  285. d->type_states[t->type].referenced = 1;
  286. break;
  287. case BTF_KIND_ARRAY: {
  288. const struct btf_array *a = btf_array(t);
  289. d->type_states[a->index_type].referenced = 1;
  290. d->type_states[a->type].referenced = 1;
  291. break;
  292. }
  293. case BTF_KIND_STRUCT:
  294. case BTF_KIND_UNION: {
  295. const struct btf_member *m = btf_members(t);
  296. for (j = 0; j < vlen; j++, m++)
  297. d->type_states[m->type].referenced = 1;
  298. break;
  299. }
  300. case BTF_KIND_FUNC_PROTO: {
  301. const struct btf_param *p = btf_params(t);
  302. for (j = 0; j < vlen; j++, p++)
  303. d->type_states[p->type].referenced = 1;
  304. break;
  305. }
  306. case BTF_KIND_DATASEC: {
  307. const struct btf_var_secinfo *v = btf_var_secinfos(t);
  308. for (j = 0; j < vlen; j++, v++)
  309. d->type_states[v->type].referenced = 1;
  310. break;
  311. }
  312. default:
  313. return -EINVAL;
  314. }
  315. }
  316. return 0;
  317. }
  318. static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
  319. {
  320. __u32 *new_queue;
  321. size_t new_cap;
  322. if (d->emit_queue_cnt >= d->emit_queue_cap) {
  323. new_cap = max(16, d->emit_queue_cap * 3 / 2);
  324. new_queue = libbpf_reallocarray(d->emit_queue, new_cap, sizeof(new_queue[0]));
  325. if (!new_queue)
  326. return -ENOMEM;
  327. d->emit_queue = new_queue;
  328. d->emit_queue_cap = new_cap;
  329. }
  330. d->emit_queue[d->emit_queue_cnt++] = id;
  331. return 0;
  332. }
  333. /*
  334. * Determine order of emitting dependent types and specified type to satisfy
  335. * C compilation rules. This is done through topological sorting with an
  336. * additional complication which comes from C rules. The main idea for C is
  337. * that if some type is "embedded" into a struct/union, it's size needs to be
  338. * known at the time of definition of containing type. E.g., for:
  339. *
  340. * struct A {};
  341. * struct B { struct A x; }
  342. *
  343. * struct A *HAS* to be defined before struct B, because it's "embedded",
  344. * i.e., it is part of struct B layout. But in the following case:
  345. *
  346. * struct A;
  347. * struct B { struct A *x; }
  348. * struct A {};
  349. *
  350. * it's enough to just have a forward declaration of struct A at the time of
  351. * struct B definition, as struct B has a pointer to struct A, so the size of
  352. * field x is known without knowing struct A size: it's sizeof(void *).
  353. *
  354. * Unfortunately, there are some trickier cases we need to handle, e.g.:
  355. *
  356. * struct A {}; // if this was forward-declaration: compilation error
  357. * struct B {
  358. * struct { // anonymous struct
  359. * struct A y;
  360. * } *x;
  361. * };
  362. *
  363. * In this case, struct B's field x is a pointer, so it's size is known
  364. * regardless of the size of (anonymous) struct it points to. But because this
  365. * struct is anonymous and thus defined inline inside struct B, *and* it
  366. * embeds struct A, compiler requires full definition of struct A to be known
  367. * before struct B can be defined. This creates a transitive dependency
  368. * between struct A and struct B. If struct A was forward-declared before
  369. * struct B definition and fully defined after struct B definition, that would
  370. * trigger compilation error.
  371. *
  372. * All this means that while we are doing topological sorting on BTF type
  373. * graph, we need to determine relationships between different types (graph
  374. * nodes):
  375. * - weak link (relationship) between X and Y, if Y *CAN* be
  376. * forward-declared at the point of X definition;
  377. * - strong link, if Y *HAS* to be fully-defined before X can be defined.
  378. *
  379. * The rule is as follows. Given a chain of BTF types from X to Y, if there is
  380. * BTF_KIND_PTR type in the chain and at least one non-anonymous type
  381. * Z (excluding X, including Y), then link is weak. Otherwise, it's strong.
  382. * Weak/strong relationship is determined recursively during DFS traversal and
  383. * is returned as a result from btf_dump_order_type().
  384. *
  385. * btf_dump_order_type() is trying to avoid unnecessary forward declarations,
  386. * but it is not guaranteeing that no extraneous forward declarations will be
  387. * emitted.
  388. *
  389. * To avoid extra work, algorithm marks some of BTF types as ORDERED, when
  390. * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT,
  391. * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the
  392. * entire graph path, so depending where from one came to that BTF type, it
  393. * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM,
  394. * once they are processed, there is no need to do it again, so they are
  395. * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces
  396. * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But
  397. * in any case, once those are processed, no need to do it again, as the
  398. * result won't change.
  399. *
  400. * Returns:
  401. * - 1, if type is part of strong link (so there is strong topological
  402. * ordering requirements);
  403. * - 0, if type is part of weak link (so can be satisfied through forward
  404. * declaration);
  405. * - <0, on error (e.g., unsatisfiable type loop detected).
  406. */
  407. static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
  408. {
  409. /*
  410. * Order state is used to detect strong link cycles, but only for BTF
  411. * kinds that are or could be an independent definition (i.e.,
  412. * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays,
  413. * func_protos, modifiers are just means to get to these definitions.
  414. * Int/void don't need definitions, they are assumed to be always
  415. * properly defined. We also ignore datasec, var, and funcs for now.
  416. * So for all non-defining kinds, we never even set ordering state,
  417. * for defining kinds we set ORDERING and subsequently ORDERED if it
  418. * forms a strong link.
  419. */
  420. struct btf_dump_type_aux_state *tstate = &d->type_states[id];
  421. const struct btf_type *t;
  422. __u16 vlen;
  423. int err, i;
  424. /* return true, letting typedefs know that it's ok to be emitted */
  425. if (tstate->order_state == ORDERED)
  426. return 1;
  427. t = btf__type_by_id(d->btf, id);
  428. if (tstate->order_state == ORDERING) {
  429. /* type loop, but resolvable through fwd declaration */
  430. if (btf_is_composite(t) && through_ptr && t->name_off != 0)
  431. return 0;
  432. pr_warn("unsatisfiable type cycle, id:[%u]\n", id);
  433. return -ELOOP;
  434. }
  435. switch (btf_kind(t)) {
  436. case BTF_KIND_INT:
  437. case BTF_KIND_FLOAT:
  438. tstate->order_state = ORDERED;
  439. return 0;
  440. case BTF_KIND_PTR:
  441. err = btf_dump_order_type(d, t->type, true);
  442. tstate->order_state = ORDERED;
  443. return err;
  444. case BTF_KIND_ARRAY:
  445. return btf_dump_order_type(d, btf_array(t)->type, false);
  446. case BTF_KIND_STRUCT:
  447. case BTF_KIND_UNION: {
  448. const struct btf_member *m = btf_members(t);
  449. /*
  450. * struct/union is part of strong link, only if it's embedded
  451. * (so no ptr in a path) or it's anonymous (so has to be
  452. * defined inline, even if declared through ptr)
  453. */
  454. if (through_ptr && t->name_off != 0)
  455. return 0;
  456. tstate->order_state = ORDERING;
  457. vlen = btf_vlen(t);
  458. for (i = 0; i < vlen; i++, m++) {
  459. err = btf_dump_order_type(d, m->type, false);
  460. if (err < 0)
  461. return err;
  462. }
  463. if (t->name_off != 0) {
  464. err = btf_dump_add_emit_queue_id(d, id);
  465. if (err < 0)
  466. return err;
  467. }
  468. tstate->order_state = ORDERED;
  469. return 1;
  470. }
  471. case BTF_KIND_ENUM:
  472. case BTF_KIND_ENUM64:
  473. case BTF_KIND_FWD:
  474. /*
  475. * non-anonymous or non-referenced enums are top-level
  476. * declarations and should be emitted. Same logic can be
  477. * applied to FWDs, it won't hurt anyways.
  478. */
  479. if (t->name_off != 0 || !tstate->referenced) {
  480. err = btf_dump_add_emit_queue_id(d, id);
  481. if (err)
  482. return err;
  483. }
  484. tstate->order_state = ORDERED;
  485. return 1;
  486. case BTF_KIND_TYPEDEF: {
  487. int is_strong;
  488. is_strong = btf_dump_order_type(d, t->type, through_ptr);
  489. if (is_strong < 0)
  490. return is_strong;
  491. /* typedef is similar to struct/union w.r.t. fwd-decls */
  492. if (through_ptr && !is_strong)
  493. return 0;
  494. /* typedef is always a named definition */
  495. err = btf_dump_add_emit_queue_id(d, id);
  496. if (err)
  497. return err;
  498. d->type_states[id].order_state = ORDERED;
  499. return 1;
  500. }
  501. case BTF_KIND_VOLATILE:
  502. case BTF_KIND_CONST:
  503. case BTF_KIND_RESTRICT:
  504. case BTF_KIND_TYPE_TAG:
  505. return btf_dump_order_type(d, t->type, through_ptr);
  506. case BTF_KIND_FUNC_PROTO: {
  507. const struct btf_param *p = btf_params(t);
  508. bool is_strong;
  509. err = btf_dump_order_type(d, t->type, through_ptr);
  510. if (err < 0)
  511. return err;
  512. is_strong = err > 0;
  513. vlen = btf_vlen(t);
  514. for (i = 0; i < vlen; i++, p++) {
  515. err = btf_dump_order_type(d, p->type, through_ptr);
  516. if (err < 0)
  517. return err;
  518. if (err > 0)
  519. is_strong = true;
  520. }
  521. return is_strong;
  522. }
  523. case BTF_KIND_FUNC:
  524. case BTF_KIND_VAR:
  525. case BTF_KIND_DATASEC:
  526. case BTF_KIND_DECL_TAG:
  527. d->type_states[id].order_state = ORDERED;
  528. return 0;
  529. default:
  530. return -EINVAL;
  531. }
  532. }
  533. static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
  534. const struct btf_type *t);
  535. static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
  536. const struct btf_type *t);
  537. static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
  538. const struct btf_type *t, int lvl);
  539. static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
  540. const struct btf_type *t);
  541. static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
  542. const struct btf_type *t, int lvl);
  543. static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
  544. const struct btf_type *t);
  545. static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
  546. const struct btf_type *t, int lvl);
  547. /* a local view into a shared stack */
  548. struct id_stack {
  549. const __u32 *ids;
  550. int cnt;
  551. };
  552. static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
  553. const char *fname, int lvl);
  554. static void btf_dump_emit_type_chain(struct btf_dump *d,
  555. struct id_stack *decl_stack,
  556. const char *fname, int lvl);
  557. static const char *btf_dump_type_name(struct btf_dump *d, __u32 id);
  558. static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id);
  559. static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
  560. const char *orig_name);
  561. static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id)
  562. {
  563. const struct btf_type *t = btf__type_by_id(d->btf, id);
  564. /* __builtin_va_list is a compiler built-in, which causes compilation
  565. * errors, when compiling w/ different compiler, then used to compile
  566. * original code (e.g., GCC to compile kernel, Clang to use generated
  567. * C header from BTF). As it is built-in, it should be already defined
  568. * properly internally in compiler.
  569. */
  570. if (t->name_off == 0)
  571. return false;
  572. return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0;
  573. }
  574. /*
  575. * Emit C-syntax definitions of types from chains of BTF types.
  576. *
  577. * High-level handling of determining necessary forward declarations are handled
  578. * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type
  579. * declarations/definitions in C syntax are handled by a combo of
  580. * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to
  581. * corresponding btf_dump_emit_*_{def,fwd}() functions.
  582. *
  583. * We also keep track of "containing struct/union type ID" to determine when
  584. * we reference it from inside and thus can avoid emitting unnecessary forward
  585. * declaration.
  586. *
  587. * This algorithm is designed in such a way, that even if some error occurs
  588. * (either technical, e.g., out of memory, or logical, i.e., malformed BTF
  589. * that doesn't comply to C rules completely), algorithm will try to proceed
  590. * and produce as much meaningful output as possible.
  591. */
  592. static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
  593. {
  594. struct btf_dump_type_aux_state *tstate = &d->type_states[id];
  595. bool top_level_def = cont_id == 0;
  596. const struct btf_type *t;
  597. __u16 kind;
  598. if (tstate->emit_state == EMITTED)
  599. return;
  600. t = btf__type_by_id(d->btf, id);
  601. kind = btf_kind(t);
  602. if (tstate->emit_state == EMITTING) {
  603. if (tstate->fwd_emitted)
  604. return;
  605. switch (kind) {
  606. case BTF_KIND_STRUCT:
  607. case BTF_KIND_UNION:
  608. /*
  609. * if we are referencing a struct/union that we are
  610. * part of - then no need for fwd declaration
  611. */
  612. if (id == cont_id)
  613. return;
  614. if (t->name_off == 0) {
  615. pr_warn("anonymous struct/union loop, id:[%u]\n",
  616. id);
  617. return;
  618. }
  619. btf_dump_emit_struct_fwd(d, id, t);
  620. btf_dump_printf(d, ";\n\n");
  621. tstate->fwd_emitted = 1;
  622. break;
  623. case BTF_KIND_TYPEDEF:
  624. /*
  625. * for typedef fwd_emitted means typedef definition
  626. * was emitted, but it can be used only for "weak"
  627. * references through pointer only, not for embedding
  628. */
  629. if (!btf_dump_is_blacklisted(d, id)) {
  630. btf_dump_emit_typedef_def(d, id, t, 0);
  631. btf_dump_printf(d, ";\n\n");
  632. }
  633. tstate->fwd_emitted = 1;
  634. break;
  635. default:
  636. break;
  637. }
  638. return;
  639. }
  640. switch (kind) {
  641. case BTF_KIND_INT:
  642. /* Emit type alias definitions if necessary */
  643. btf_dump_emit_missing_aliases(d, id, t);
  644. tstate->emit_state = EMITTED;
  645. break;
  646. case BTF_KIND_ENUM:
  647. case BTF_KIND_ENUM64:
  648. if (top_level_def) {
  649. btf_dump_emit_enum_def(d, id, t, 0);
  650. btf_dump_printf(d, ";\n\n");
  651. }
  652. tstate->emit_state = EMITTED;
  653. break;
  654. case BTF_KIND_PTR:
  655. case BTF_KIND_VOLATILE:
  656. case BTF_KIND_CONST:
  657. case BTF_KIND_RESTRICT:
  658. case BTF_KIND_TYPE_TAG:
  659. btf_dump_emit_type(d, t->type, cont_id);
  660. break;
  661. case BTF_KIND_ARRAY:
  662. btf_dump_emit_type(d, btf_array(t)->type, cont_id);
  663. break;
  664. case BTF_KIND_FWD:
  665. btf_dump_emit_fwd_def(d, id, t);
  666. btf_dump_printf(d, ";\n\n");
  667. tstate->emit_state = EMITTED;
  668. break;
  669. case BTF_KIND_TYPEDEF:
  670. tstate->emit_state = EMITTING;
  671. btf_dump_emit_type(d, t->type, id);
  672. /*
  673. * typedef can server as both definition and forward
  674. * declaration; at this stage someone depends on
  675. * typedef as a forward declaration (refers to it
  676. * through pointer), so unless we already did it,
  677. * emit typedef as a forward declaration
  678. */
  679. if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) {
  680. btf_dump_emit_typedef_def(d, id, t, 0);
  681. btf_dump_printf(d, ";\n\n");
  682. }
  683. tstate->emit_state = EMITTED;
  684. break;
  685. case BTF_KIND_STRUCT:
  686. case BTF_KIND_UNION:
  687. tstate->emit_state = EMITTING;
  688. /* if it's a top-level struct/union definition or struct/union
  689. * is anonymous, then in C we'll be emitting all fields and
  690. * their types (as opposed to just `struct X`), so we need to
  691. * make sure that all types, referenced from struct/union
  692. * members have necessary forward-declarations, where
  693. * applicable
  694. */
  695. if (top_level_def || t->name_off == 0) {
  696. const struct btf_member *m = btf_members(t);
  697. __u16 vlen = btf_vlen(t);
  698. int i, new_cont_id;
  699. new_cont_id = t->name_off == 0 ? cont_id : id;
  700. for (i = 0; i < vlen; i++, m++)
  701. btf_dump_emit_type(d, m->type, new_cont_id);
  702. } else if (!tstate->fwd_emitted && id != cont_id) {
  703. btf_dump_emit_struct_fwd(d, id, t);
  704. btf_dump_printf(d, ";\n\n");
  705. tstate->fwd_emitted = 1;
  706. }
  707. if (top_level_def) {
  708. btf_dump_emit_struct_def(d, id, t, 0);
  709. btf_dump_printf(d, ";\n\n");
  710. tstate->emit_state = EMITTED;
  711. } else {
  712. tstate->emit_state = NOT_EMITTED;
  713. }
  714. break;
  715. case BTF_KIND_FUNC_PROTO: {
  716. const struct btf_param *p = btf_params(t);
  717. __u16 n = btf_vlen(t);
  718. int i;
  719. btf_dump_emit_type(d, t->type, cont_id);
  720. for (i = 0; i < n; i++, p++)
  721. btf_dump_emit_type(d, p->type, cont_id);
  722. break;
  723. }
  724. default:
  725. break;
  726. }
  727. }
  728. static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
  729. const struct btf_type *t)
  730. {
  731. const struct btf_member *m;
  732. int max_align = 1, align, i, bit_sz;
  733. __u16 vlen;
  734. m = btf_members(t);
  735. vlen = btf_vlen(t);
  736. /* all non-bitfield fields have to be naturally aligned */
  737. for (i = 0; i < vlen; i++, m++) {
  738. align = btf__align_of(btf, m->type);
  739. bit_sz = btf_member_bitfield_size(t, i);
  740. if (align && bit_sz == 0 && m->offset % (8 * align) != 0)
  741. return true;
  742. max_align = max(align, max_align);
  743. }
  744. /* size of a non-packed struct has to be a multiple of its alignment */
  745. if (t->size % max_align != 0)
  746. return true;
  747. /*
  748. * if original struct was marked as packed, but its layout is
  749. * naturally aligned, we'll detect that it's not packed
  750. */
  751. return false;
  752. }
  753. static void btf_dump_emit_bit_padding(const struct btf_dump *d,
  754. int cur_off, int next_off, int next_align,
  755. bool in_bitfield, int lvl)
  756. {
  757. const struct {
  758. const char *name;
  759. int bits;
  760. } pads[] = {
  761. {"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
  762. };
  763. int new_off, pad_bits, bits, i;
  764. const char *pad_type;
  765. if (cur_off >= next_off)
  766. return; /* no gap */
  767. /* For filling out padding we want to take advantage of
  768. * natural alignment rules to minimize unnecessary explicit
  769. * padding. First, we find the largest type (among long, int,
  770. * short, or char) that can be used to force naturally aligned
  771. * boundary. Once determined, we'll use such type to fill in
  772. * the remaining padding gap. In some cases we can rely on
  773. * compiler filling some gaps, but sometimes we need to force
  774. * alignment to close natural alignment with markers like
  775. * `long: 0` (this is always the case for bitfields). Note
  776. * that even if struct itself has, let's say 4-byte alignment
  777. * (i.e., it only uses up to int-aligned types), using `long:
  778. * X;` explicit padding doesn't actually change struct's
  779. * overall alignment requirements, but compiler does take into
  780. * account that type's (long, in this example) natural
  781. * alignment requirements when adding implicit padding. We use
  782. * this fact heavily and don't worry about ruining correct
  783. * struct alignment requirement.
  784. */
  785. for (i = 0; i < ARRAY_SIZE(pads); i++) {
  786. pad_bits = pads[i].bits;
  787. pad_type = pads[i].name;
  788. new_off = roundup(cur_off, pad_bits);
  789. if (new_off <= next_off)
  790. break;
  791. }
  792. if (new_off > cur_off && new_off <= next_off) {
  793. /* We need explicit `<type>: 0` aligning mark if next
  794. * field is right on alignment offset and its
  795. * alignment requirement is less strict than <type>'s
  796. * alignment (so compiler won't naturally align to the
  797. * offset we expect), or if subsequent `<type>: X`,
  798. * will actually completely fit in the remaining hole,
  799. * making compiler basically ignore `<type>: X`
  800. * completely.
  801. */
  802. if (in_bitfield ||
  803. (new_off == next_off && roundup(cur_off, next_align * 8) != new_off) ||
  804. (new_off != next_off && next_off - new_off <= new_off - cur_off))
  805. /* but for bitfields we'll emit explicit bit count */
  806. btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type,
  807. in_bitfield ? new_off - cur_off : 0);
  808. cur_off = new_off;
  809. }
  810. /* Now we know we start at naturally aligned offset for a chosen
  811. * padding type (long, int, short, or char), and so the rest is just
  812. * a straightforward filling of remaining padding gap with full
  813. * `<type>: sizeof(<type>);` markers, except for the last one, which
  814. * might need smaller than sizeof(<type>) padding.
  815. */
  816. while (cur_off != next_off) {
  817. bits = min(next_off - cur_off, pad_bits);
  818. if (bits == pad_bits) {
  819. btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
  820. cur_off += bits;
  821. continue;
  822. }
  823. /* For the remainder padding that doesn't cover entire
  824. * pad_type bit length, we pick the smallest necessary type.
  825. * This is pure aesthetics, we could have just used `long`,
  826. * but having smallest necessary one communicates better the
  827. * scale of the padding gap.
  828. */
  829. for (i = ARRAY_SIZE(pads) - 1; i >= 0; i--) {
  830. pad_type = pads[i].name;
  831. pad_bits = pads[i].bits;
  832. if (pad_bits < bits)
  833. continue;
  834. btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, bits);
  835. cur_off += bits;
  836. break;
  837. }
  838. }
  839. }
  840. static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
  841. const struct btf_type *t)
  842. {
  843. btf_dump_printf(d, "%s%s%s",
  844. btf_is_struct(t) ? "struct" : "union",
  845. t->name_off ? " " : "",
  846. btf_dump_type_name(d, id));
  847. }
  848. static void btf_dump_emit_struct_def(struct btf_dump *d,
  849. __u32 id,
  850. const struct btf_type *t,
  851. int lvl)
  852. {
  853. const struct btf_member *m = btf_members(t);
  854. bool is_struct = btf_is_struct(t);
  855. bool packed, prev_bitfield = false;
  856. int align, i, off = 0;
  857. __u16 vlen = btf_vlen(t);
  858. align = btf__align_of(d->btf, id);
  859. packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
  860. btf_dump_printf(d, "%s%s%s {",
  861. is_struct ? "struct" : "union",
  862. t->name_off ? " " : "",
  863. btf_dump_type_name(d, id));
  864. for (i = 0; i < vlen; i++, m++) {
  865. const char *fname;
  866. int m_off, m_sz, m_align;
  867. bool in_bitfield;
  868. fname = btf_name_of(d, m->name_off);
  869. m_sz = btf_member_bitfield_size(t, i);
  870. m_off = btf_member_bit_offset(t, i);
  871. m_align = packed ? 1 : btf__align_of(d->btf, m->type);
  872. in_bitfield = prev_bitfield && m_sz != 0;
  873. btf_dump_emit_bit_padding(d, off, m_off, m_align, in_bitfield, lvl + 1);
  874. btf_dump_printf(d, "\n%s", pfx(lvl + 1));
  875. btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
  876. if (m_sz) {
  877. btf_dump_printf(d, ": %d", m_sz);
  878. off = m_off + m_sz;
  879. prev_bitfield = true;
  880. } else {
  881. m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type));
  882. off = m_off + m_sz * 8;
  883. prev_bitfield = false;
  884. }
  885. btf_dump_printf(d, ";");
  886. }
  887. /* pad at the end, if necessary */
  888. if (is_struct)
  889. btf_dump_emit_bit_padding(d, off, t->size * 8, align, false, lvl + 1);
  890. /*
  891. * Keep `struct empty {}` on a single line,
  892. * only print newline when there are regular or padding fields.
  893. */
  894. if (vlen || t->size) {
  895. btf_dump_printf(d, "\n");
  896. btf_dump_printf(d, "%s}", pfx(lvl));
  897. } else {
  898. btf_dump_printf(d, "}");
  899. }
  900. if (packed)
  901. btf_dump_printf(d, " __attribute__((packed))");
  902. }
  903. static const char *missing_base_types[][2] = {
  904. /*
  905. * GCC emits typedefs to its internal __PolyX_t types when compiling Arm
  906. * SIMD intrinsics. Alias them to standard base types.
  907. */
  908. { "__Poly8_t", "unsigned char" },
  909. { "__Poly16_t", "unsigned short" },
  910. { "__Poly64_t", "unsigned long long" },
  911. { "__Poly128_t", "unsigned __int128" },
  912. };
  913. static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
  914. const struct btf_type *t)
  915. {
  916. const char *name = btf_dump_type_name(d, id);
  917. int i;
  918. for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
  919. if (strcmp(name, missing_base_types[i][0]) == 0) {
  920. btf_dump_printf(d, "typedef %s %s;\n\n",
  921. missing_base_types[i][1], name);
  922. break;
  923. }
  924. }
  925. }
  926. static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
  927. const struct btf_type *t)
  928. {
  929. btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
  930. }
  931. static void btf_dump_emit_enum32_val(struct btf_dump *d,
  932. const struct btf_type *t,
  933. int lvl, __u16 vlen)
  934. {
  935. const struct btf_enum *v = btf_enum(t);
  936. bool is_signed = btf_kflag(t);
  937. const char *fmt_str;
  938. const char *name;
  939. size_t dup_cnt;
  940. int i;
  941. for (i = 0; i < vlen; i++, v++) {
  942. name = btf_name_of(d, v->name_off);
  943. /* enumerators share namespace with typedef idents */
  944. dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
  945. if (dup_cnt > 1) {
  946. fmt_str = is_signed ? "\n%s%s___%zd = %d," : "\n%s%s___%zd = %u,";
  947. btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, dup_cnt, v->val);
  948. } else {
  949. fmt_str = is_signed ? "\n%s%s = %d," : "\n%s%s = %u,";
  950. btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, v->val);
  951. }
  952. }
  953. }
  954. static void btf_dump_emit_enum64_val(struct btf_dump *d,
  955. const struct btf_type *t,
  956. int lvl, __u16 vlen)
  957. {
  958. const struct btf_enum64 *v = btf_enum64(t);
  959. bool is_signed = btf_kflag(t);
  960. const char *fmt_str;
  961. const char *name;
  962. size_t dup_cnt;
  963. __u64 val;
  964. int i;
  965. for (i = 0; i < vlen; i++, v++) {
  966. name = btf_name_of(d, v->name_off);
  967. dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
  968. val = btf_enum64_value(v);
  969. if (dup_cnt > 1) {
  970. fmt_str = is_signed ? "\n%s%s___%zd = %lldLL,"
  971. : "\n%s%s___%zd = %lluULL,";
  972. btf_dump_printf(d, fmt_str,
  973. pfx(lvl + 1), name, dup_cnt,
  974. (unsigned long long)val);
  975. } else {
  976. fmt_str = is_signed ? "\n%s%s = %lldLL,"
  977. : "\n%s%s = %lluULL,";
  978. btf_dump_printf(d, fmt_str,
  979. pfx(lvl + 1), name,
  980. (unsigned long long)val);
  981. }
  982. }
  983. }
  984. static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
  985. const struct btf_type *t,
  986. int lvl)
  987. {
  988. __u16 vlen = btf_vlen(t);
  989. btf_dump_printf(d, "enum%s%s",
  990. t->name_off ? " " : "",
  991. btf_dump_type_name(d, id));
  992. if (!vlen)
  993. return;
  994. btf_dump_printf(d, " {");
  995. if (btf_is_enum(t))
  996. btf_dump_emit_enum32_val(d, t, lvl, vlen);
  997. else
  998. btf_dump_emit_enum64_val(d, t, lvl, vlen);
  999. btf_dump_printf(d, "\n%s}", pfx(lvl));
  1000. }
  1001. static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
  1002. const struct btf_type *t)
  1003. {
  1004. const char *name = btf_dump_type_name(d, id);
  1005. if (btf_kflag(t))
  1006. btf_dump_printf(d, "union %s", name);
  1007. else
  1008. btf_dump_printf(d, "struct %s", name);
  1009. }
  1010. static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
  1011. const struct btf_type *t, int lvl)
  1012. {
  1013. const char *name = btf_dump_ident_name(d, id);
  1014. /*
  1015. * Old GCC versions are emitting invalid typedef for __gnuc_va_list
  1016. * pointing to VOID. This generates warnings from btf_dump() and
  1017. * results in uncompilable header file, so we are fixing it up here
  1018. * with valid typedef into __builtin_va_list.
  1019. */
  1020. if (t->type == 0 && strcmp(name, "__gnuc_va_list") == 0) {
  1021. btf_dump_printf(d, "typedef __builtin_va_list __gnuc_va_list");
  1022. return;
  1023. }
  1024. btf_dump_printf(d, "typedef ");
  1025. btf_dump_emit_type_decl(d, t->type, name, lvl);
  1026. }
  1027. static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
  1028. {
  1029. __u32 *new_stack;
  1030. size_t new_cap;
  1031. if (d->decl_stack_cnt >= d->decl_stack_cap) {
  1032. new_cap = max(16, d->decl_stack_cap * 3 / 2);
  1033. new_stack = libbpf_reallocarray(d->decl_stack, new_cap, sizeof(new_stack[0]));
  1034. if (!new_stack)
  1035. return -ENOMEM;
  1036. d->decl_stack = new_stack;
  1037. d->decl_stack_cap = new_cap;
  1038. }
  1039. d->decl_stack[d->decl_stack_cnt++] = id;
  1040. return 0;
  1041. }
  1042. /*
  1043. * Emit type declaration (e.g., field type declaration in a struct or argument
  1044. * declaration in function prototype) in correct C syntax.
  1045. *
  1046. * For most types it's trivial, but there are few quirky type declaration
  1047. * cases worth mentioning:
  1048. * - function prototypes (especially nesting of function prototypes);
  1049. * - arrays;
  1050. * - const/volatile/restrict for pointers vs other types.
  1051. *
  1052. * For a good discussion of *PARSING* C syntax (as a human), see
  1053. * Peter van der Linden's "Expert C Programming: Deep C Secrets",
  1054. * Ch.3 "Unscrambling Declarations in C".
  1055. *
  1056. * It won't help with BTF to C conversion much, though, as it's an opposite
  1057. * problem. So we came up with this algorithm in reverse to van der Linden's
  1058. * parsing algorithm. It goes from structured BTF representation of type
  1059. * declaration to a valid compilable C syntax.
  1060. *
  1061. * For instance, consider this C typedef:
  1062. * typedef const int * const * arr[10] arr_t;
  1063. * It will be represented in BTF with this chain of BTF types:
  1064. * [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int]
  1065. *
  1066. * Notice how [const] modifier always goes before type it modifies in BTF type
  1067. * graph, but in C syntax, const/volatile/restrict modifiers are written to
  1068. * the right of pointers, but to the left of other types. There are also other
  1069. * quirks, like function pointers, arrays of them, functions returning other
  1070. * functions, etc.
  1071. *
  1072. * We handle that by pushing all the types to a stack, until we hit "terminal"
  1073. * type (int/enum/struct/union/fwd). Then depending on the kind of a type on
  1074. * top of a stack, modifiers are handled differently. Array/function pointers
  1075. * have also wildly different syntax and how nesting of them are done. See
  1076. * code for authoritative definition.
  1077. *
  1078. * To avoid allocating new stack for each independent chain of BTF types, we
  1079. * share one bigger stack, with each chain working only on its own local view
  1080. * of a stack frame. Some care is required to "pop" stack frames after
  1081. * processing type declaration chain.
  1082. */
  1083. int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
  1084. const struct btf_dump_emit_type_decl_opts *opts)
  1085. {
  1086. const char *fname;
  1087. int lvl, err;
  1088. if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
  1089. return libbpf_err(-EINVAL);
  1090. err = btf_dump_resize(d);
  1091. if (err)
  1092. return libbpf_err(err);
  1093. fname = OPTS_GET(opts, field_name, "");
  1094. lvl = OPTS_GET(opts, indent_level, 0);
  1095. d->strip_mods = OPTS_GET(opts, strip_mods, false);
  1096. btf_dump_emit_type_decl(d, id, fname, lvl);
  1097. d->strip_mods = false;
  1098. return 0;
  1099. }
  1100. static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
  1101. const char *fname, int lvl)
  1102. {
  1103. struct id_stack decl_stack;
  1104. const struct btf_type *t;
  1105. int err, stack_start;
  1106. stack_start = d->decl_stack_cnt;
  1107. for (;;) {
  1108. t = btf__type_by_id(d->btf, id);
  1109. if (d->strip_mods && btf_is_mod(t))
  1110. goto skip_mod;
  1111. err = btf_dump_push_decl_stack_id(d, id);
  1112. if (err < 0) {
  1113. /*
  1114. * if we don't have enough memory for entire type decl
  1115. * chain, restore stack, emit warning, and try to
  1116. * proceed nevertheless
  1117. */
  1118. pr_warn("not enough memory for decl stack:%d", err);
  1119. d->decl_stack_cnt = stack_start;
  1120. return;
  1121. }
  1122. skip_mod:
  1123. /* VOID */
  1124. if (id == 0)
  1125. break;
  1126. switch (btf_kind(t)) {
  1127. case BTF_KIND_PTR:
  1128. case BTF_KIND_VOLATILE:
  1129. case BTF_KIND_CONST:
  1130. case BTF_KIND_RESTRICT:
  1131. case BTF_KIND_FUNC_PROTO:
  1132. case BTF_KIND_TYPE_TAG:
  1133. id = t->type;
  1134. break;
  1135. case BTF_KIND_ARRAY:
  1136. id = btf_array(t)->type;
  1137. break;
  1138. case BTF_KIND_INT:
  1139. case BTF_KIND_ENUM:
  1140. case BTF_KIND_ENUM64:
  1141. case BTF_KIND_FWD:
  1142. case BTF_KIND_STRUCT:
  1143. case BTF_KIND_UNION:
  1144. case BTF_KIND_TYPEDEF:
  1145. case BTF_KIND_FLOAT:
  1146. goto done;
  1147. default:
  1148. pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
  1149. btf_kind(t), id);
  1150. goto done;
  1151. }
  1152. }
  1153. done:
  1154. /*
  1155. * We might be inside a chain of declarations (e.g., array of function
  1156. * pointers returning anonymous (so inlined) structs, having another
  1157. * array field). Each of those needs its own "stack frame" to handle
  1158. * emitting of declarations. Those stack frames are non-overlapping
  1159. * portions of shared btf_dump->decl_stack. To make it a bit nicer to
  1160. * handle this set of nested stacks, we create a view corresponding to
  1161. * our own "stack frame" and work with it as an independent stack.
  1162. * We'll need to clean up after emit_type_chain() returns, though.
  1163. */
  1164. decl_stack.ids = d->decl_stack + stack_start;
  1165. decl_stack.cnt = d->decl_stack_cnt - stack_start;
  1166. btf_dump_emit_type_chain(d, &decl_stack, fname, lvl);
  1167. /*
  1168. * emit_type_chain() guarantees that it will pop its entire decl_stack
  1169. * frame before returning. But it works with a read-only view into
  1170. * decl_stack, so it doesn't actually pop anything from the
  1171. * perspective of shared btf_dump->decl_stack, per se. We need to
  1172. * reset decl_stack state to how it was before us to avoid it growing
  1173. * all the time.
  1174. */
  1175. d->decl_stack_cnt = stack_start;
  1176. }
  1177. static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
  1178. {
  1179. const struct btf_type *t;
  1180. __u32 id;
  1181. while (decl_stack->cnt) {
  1182. id = decl_stack->ids[decl_stack->cnt - 1];
  1183. t = btf__type_by_id(d->btf, id);
  1184. switch (btf_kind(t)) {
  1185. case BTF_KIND_VOLATILE:
  1186. btf_dump_printf(d, "volatile ");
  1187. break;
  1188. case BTF_KIND_CONST:
  1189. btf_dump_printf(d, "const ");
  1190. break;
  1191. case BTF_KIND_RESTRICT:
  1192. btf_dump_printf(d, "restrict ");
  1193. break;
  1194. default:
  1195. return;
  1196. }
  1197. decl_stack->cnt--;
  1198. }
  1199. }
  1200. static void btf_dump_drop_mods(struct btf_dump *d, struct id_stack *decl_stack)
  1201. {
  1202. const struct btf_type *t;
  1203. __u32 id;
  1204. while (decl_stack->cnt) {
  1205. id = decl_stack->ids[decl_stack->cnt - 1];
  1206. t = btf__type_by_id(d->btf, id);
  1207. if (!btf_is_mod(t))
  1208. return;
  1209. decl_stack->cnt--;
  1210. }
  1211. }
  1212. static void btf_dump_emit_name(const struct btf_dump *d,
  1213. const char *name, bool last_was_ptr)
  1214. {
  1215. bool separate = name[0] && !last_was_ptr;
  1216. btf_dump_printf(d, "%s%s", separate ? " " : "", name);
  1217. }
  1218. static void btf_dump_emit_type_chain(struct btf_dump *d,
  1219. struct id_stack *decls,
  1220. const char *fname, int lvl)
  1221. {
  1222. /*
  1223. * last_was_ptr is used to determine if we need to separate pointer
  1224. * asterisk (*) from previous part of type signature with space, so
  1225. * that we get `int ***`, instead of `int * * *`. We default to true
  1226. * for cases where we have single pointer in a chain. E.g., in ptr ->
  1227. * func_proto case. func_proto will start a new emit_type_chain call
  1228. * with just ptr, which should be emitted as (*) or (*<fname>), so we
  1229. * don't want to prepend space for that last pointer.
  1230. */
  1231. bool last_was_ptr = true;
  1232. const struct btf_type *t;
  1233. const char *name;
  1234. __u16 kind;
  1235. __u32 id;
  1236. while (decls->cnt) {
  1237. id = decls->ids[--decls->cnt];
  1238. if (id == 0) {
  1239. /* VOID is a special snowflake */
  1240. btf_dump_emit_mods(d, decls);
  1241. btf_dump_printf(d, "void");
  1242. last_was_ptr = false;
  1243. continue;
  1244. }
  1245. t = btf__type_by_id(d->btf, id);
  1246. kind = btf_kind(t);
  1247. switch (kind) {
  1248. case BTF_KIND_INT:
  1249. case BTF_KIND_FLOAT:
  1250. btf_dump_emit_mods(d, decls);
  1251. name = btf_name_of(d, t->name_off);
  1252. btf_dump_printf(d, "%s", name);
  1253. break;
  1254. case BTF_KIND_STRUCT:
  1255. case BTF_KIND_UNION:
  1256. btf_dump_emit_mods(d, decls);
  1257. /* inline anonymous struct/union */
  1258. if (t->name_off == 0 && !d->skip_anon_defs)
  1259. btf_dump_emit_struct_def(d, id, t, lvl);
  1260. else
  1261. btf_dump_emit_struct_fwd(d, id, t);
  1262. break;
  1263. case BTF_KIND_ENUM:
  1264. case BTF_KIND_ENUM64:
  1265. btf_dump_emit_mods(d, decls);
  1266. /* inline anonymous enum */
  1267. if (t->name_off == 0 && !d->skip_anon_defs)
  1268. btf_dump_emit_enum_def(d, id, t, lvl);
  1269. else
  1270. btf_dump_emit_enum_fwd(d, id, t);
  1271. break;
  1272. case BTF_KIND_FWD:
  1273. btf_dump_emit_mods(d, decls);
  1274. btf_dump_emit_fwd_def(d, id, t);
  1275. break;
  1276. case BTF_KIND_TYPEDEF:
  1277. btf_dump_emit_mods(d, decls);
  1278. btf_dump_printf(d, "%s", btf_dump_ident_name(d, id));
  1279. break;
  1280. case BTF_KIND_PTR:
  1281. btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *");
  1282. break;
  1283. case BTF_KIND_VOLATILE:
  1284. btf_dump_printf(d, " volatile");
  1285. break;
  1286. case BTF_KIND_CONST:
  1287. btf_dump_printf(d, " const");
  1288. break;
  1289. case BTF_KIND_RESTRICT:
  1290. btf_dump_printf(d, " restrict");
  1291. break;
  1292. case BTF_KIND_TYPE_TAG:
  1293. btf_dump_emit_mods(d, decls);
  1294. name = btf_name_of(d, t->name_off);
  1295. btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
  1296. break;
  1297. case BTF_KIND_ARRAY: {
  1298. const struct btf_array *a = btf_array(t);
  1299. const struct btf_type *next_t;
  1300. __u32 next_id;
  1301. bool multidim;
  1302. /*
  1303. * GCC has a bug
  1304. * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354)
  1305. * which causes it to emit extra const/volatile
  1306. * modifiers for an array, if array's element type has
  1307. * const/volatile modifiers. Clang doesn't do that.
  1308. * In general, it doesn't seem very meaningful to have
  1309. * a const/volatile modifier for array, so we are
  1310. * going to silently skip them here.
  1311. */
  1312. btf_dump_drop_mods(d, decls);
  1313. if (decls->cnt == 0) {
  1314. btf_dump_emit_name(d, fname, last_was_ptr);
  1315. btf_dump_printf(d, "[%u]", a->nelems);
  1316. return;
  1317. }
  1318. next_id = decls->ids[decls->cnt - 1];
  1319. next_t = btf__type_by_id(d->btf, next_id);
  1320. multidim = btf_is_array(next_t);
  1321. /* we need space if we have named non-pointer */
  1322. if (fname[0] && !last_was_ptr)
  1323. btf_dump_printf(d, " ");
  1324. /* no parentheses for multi-dimensional array */
  1325. if (!multidim)
  1326. btf_dump_printf(d, "(");
  1327. btf_dump_emit_type_chain(d, decls, fname, lvl);
  1328. if (!multidim)
  1329. btf_dump_printf(d, ")");
  1330. btf_dump_printf(d, "[%u]", a->nelems);
  1331. return;
  1332. }
  1333. case BTF_KIND_FUNC_PROTO: {
  1334. const struct btf_param *p = btf_params(t);
  1335. __u16 vlen = btf_vlen(t);
  1336. int i;
  1337. /*
  1338. * GCC emits extra volatile qualifier for
  1339. * __attribute__((noreturn)) function pointers. Clang
  1340. * doesn't do it. It's a GCC quirk for backwards
  1341. * compatibility with code written for GCC <2.5. So,
  1342. * similarly to extra qualifiers for array, just drop
  1343. * them, instead of handling them.
  1344. */
  1345. btf_dump_drop_mods(d, decls);
  1346. if (decls->cnt) {
  1347. btf_dump_printf(d, " (");
  1348. btf_dump_emit_type_chain(d, decls, fname, lvl);
  1349. btf_dump_printf(d, ")");
  1350. } else {
  1351. btf_dump_emit_name(d, fname, last_was_ptr);
  1352. }
  1353. btf_dump_printf(d, "(");
  1354. /*
  1355. * Clang for BPF target generates func_proto with no
  1356. * args as a func_proto with a single void arg (e.g.,
  1357. * `int (*f)(void)` vs just `int (*f)()`). We are
  1358. * going to pretend there are no args for such case.
  1359. */
  1360. if (vlen == 1 && p->type == 0) {
  1361. btf_dump_printf(d, ")");
  1362. return;
  1363. }
  1364. for (i = 0; i < vlen; i++, p++) {
  1365. if (i > 0)
  1366. btf_dump_printf(d, ", ");
  1367. /* last arg of type void is vararg */
  1368. if (i == vlen - 1 && p->type == 0) {
  1369. btf_dump_printf(d, "...");
  1370. break;
  1371. }
  1372. name = btf_name_of(d, p->name_off);
  1373. btf_dump_emit_type_decl(d, p->type, name, lvl);
  1374. }
  1375. btf_dump_printf(d, ")");
  1376. return;
  1377. }
  1378. default:
  1379. pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
  1380. kind, id);
  1381. return;
  1382. }
  1383. last_was_ptr = kind == BTF_KIND_PTR;
  1384. }
  1385. btf_dump_emit_name(d, fname, last_was_ptr);
  1386. }
  1387. /* show type name as (type_name) */
  1388. static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id,
  1389. bool top_level)
  1390. {
  1391. const struct btf_type *t;
  1392. /* for array members, we don't bother emitting type name for each
  1393. * member to avoid the redundancy of
  1394. * .name = (char[4])[(char)'f',(char)'o',(char)'o',]
  1395. */
  1396. if (d->typed_dump->is_array_member)
  1397. return;
  1398. /* avoid type name specification for variable/section; it will be done
  1399. * for the associated variable value(s).
  1400. */
  1401. t = btf__type_by_id(d->btf, id);
  1402. if (btf_is_var(t) || btf_is_datasec(t))
  1403. return;
  1404. if (top_level)
  1405. btf_dump_printf(d, "(");
  1406. d->skip_anon_defs = true;
  1407. d->strip_mods = true;
  1408. btf_dump_emit_type_decl(d, id, "", 0);
  1409. d->strip_mods = false;
  1410. d->skip_anon_defs = false;
  1411. if (top_level)
  1412. btf_dump_printf(d, ")");
  1413. }
  1414. /* return number of duplicates (occurrences) of a given name */
  1415. static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
  1416. const char *orig_name)
  1417. {
  1418. char *old_name, *new_name;
  1419. size_t dup_cnt = 0;
  1420. int err;
  1421. new_name = strdup(orig_name);
  1422. if (!new_name)
  1423. return 1;
  1424. hashmap__find(name_map, orig_name, (void **)&dup_cnt);
  1425. dup_cnt++;
  1426. err = hashmap__set(name_map, new_name, (void *)dup_cnt,
  1427. (const void **)&old_name, NULL);
  1428. if (err)
  1429. free(new_name);
  1430. free(old_name);
  1431. return dup_cnt;
  1432. }
  1433. static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
  1434. struct hashmap *name_map)
  1435. {
  1436. struct btf_dump_type_aux_state *s = &d->type_states[id];
  1437. const struct btf_type *t = btf__type_by_id(d->btf, id);
  1438. const char *orig_name = btf_name_of(d, t->name_off);
  1439. const char **cached_name = &d->cached_names[id];
  1440. size_t dup_cnt;
  1441. if (t->name_off == 0)
  1442. return "";
  1443. if (s->name_resolved)
  1444. return *cached_name ? *cached_name : orig_name;
  1445. if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) {
  1446. s->name_resolved = 1;
  1447. return orig_name;
  1448. }
  1449. dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
  1450. if (dup_cnt > 1) {
  1451. const size_t max_len = 256;
  1452. char new_name[max_len];
  1453. snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt);
  1454. *cached_name = strdup(new_name);
  1455. }
  1456. s->name_resolved = 1;
  1457. return *cached_name ? *cached_name : orig_name;
  1458. }
  1459. static const char *btf_dump_type_name(struct btf_dump *d, __u32 id)
  1460. {
  1461. return btf_dump_resolve_name(d, id, d->type_names);
  1462. }
  1463. static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id)
  1464. {
  1465. return btf_dump_resolve_name(d, id, d->ident_names);
  1466. }
  1467. static int btf_dump_dump_type_data(struct btf_dump *d,
  1468. const char *fname,
  1469. const struct btf_type *t,
  1470. __u32 id,
  1471. const void *data,
  1472. __u8 bits_offset,
  1473. __u8 bit_sz);
  1474. static const char *btf_dump_data_newline(struct btf_dump *d)
  1475. {
  1476. return d->typed_dump->compact || d->typed_dump->depth == 0 ? "" : "\n";
  1477. }
  1478. static const char *btf_dump_data_delim(struct btf_dump *d)
  1479. {
  1480. return d->typed_dump->depth == 0 ? "" : ",";
  1481. }
  1482. static void btf_dump_data_pfx(struct btf_dump *d)
  1483. {
  1484. int i, lvl = d->typed_dump->indent_lvl + d->typed_dump->depth;
  1485. if (d->typed_dump->compact)
  1486. return;
  1487. for (i = 0; i < lvl; i++)
  1488. btf_dump_printf(d, "%s", d->typed_dump->indent_str);
  1489. }
  1490. /* A macro is used here as btf_type_value[s]() appends format specifiers
  1491. * to the format specifier passed in; these do the work of appending
  1492. * delimiters etc while the caller simply has to specify the type values
  1493. * in the format specifier + value(s).
  1494. */
  1495. #define btf_dump_type_values(d, fmt, ...) \
  1496. btf_dump_printf(d, fmt "%s%s", \
  1497. ##__VA_ARGS__, \
  1498. btf_dump_data_delim(d), \
  1499. btf_dump_data_newline(d))
  1500. static int btf_dump_unsupported_data(struct btf_dump *d,
  1501. const struct btf_type *t,
  1502. __u32 id)
  1503. {
  1504. btf_dump_printf(d, "<unsupported kind:%u>", btf_kind(t));
  1505. return -ENOTSUP;
  1506. }
  1507. static int btf_dump_get_bitfield_value(struct btf_dump *d,
  1508. const struct btf_type *t,
  1509. const void *data,
  1510. __u8 bits_offset,
  1511. __u8 bit_sz,
  1512. __u64 *value)
  1513. {
  1514. __u16 left_shift_bits, right_shift_bits;
  1515. const __u8 *bytes = data;
  1516. __u8 nr_copy_bits;
  1517. __u64 num = 0;
  1518. int i;
  1519. /* Maximum supported bitfield size is 64 bits */
  1520. if (t->size > 8) {
  1521. pr_warn("unexpected bitfield size %d\n", t->size);
  1522. return -EINVAL;
  1523. }
  1524. /* Bitfield value retrieval is done in two steps; first relevant bytes are
  1525. * stored in num, then we left/right shift num to eliminate irrelevant bits.
  1526. */
  1527. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  1528. for (i = t->size - 1; i >= 0; i--)
  1529. num = num * 256 + bytes[i];
  1530. nr_copy_bits = bit_sz + bits_offset;
  1531. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  1532. for (i = 0; i < t->size; i++)
  1533. num = num * 256 + bytes[i];
  1534. nr_copy_bits = t->size * 8 - bits_offset;
  1535. #else
  1536. # error "Unrecognized __BYTE_ORDER__"
  1537. #endif
  1538. left_shift_bits = 64 - nr_copy_bits;
  1539. right_shift_bits = 64 - bit_sz;
  1540. *value = (num << left_shift_bits) >> right_shift_bits;
  1541. return 0;
  1542. }
  1543. static int btf_dump_bitfield_check_zero(struct btf_dump *d,
  1544. const struct btf_type *t,
  1545. const void *data,
  1546. __u8 bits_offset,
  1547. __u8 bit_sz)
  1548. {
  1549. __u64 check_num;
  1550. int err;
  1551. err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &check_num);
  1552. if (err)
  1553. return err;
  1554. if (check_num == 0)
  1555. return -ENODATA;
  1556. return 0;
  1557. }
  1558. static int btf_dump_bitfield_data(struct btf_dump *d,
  1559. const struct btf_type *t,
  1560. const void *data,
  1561. __u8 bits_offset,
  1562. __u8 bit_sz)
  1563. {
  1564. __u64 print_num;
  1565. int err;
  1566. err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &print_num);
  1567. if (err)
  1568. return err;
  1569. btf_dump_type_values(d, "0x%llx", (unsigned long long)print_num);
  1570. return 0;
  1571. }
  1572. /* ints, floats and ptrs */
  1573. static int btf_dump_base_type_check_zero(struct btf_dump *d,
  1574. const struct btf_type *t,
  1575. __u32 id,
  1576. const void *data)
  1577. {
  1578. static __u8 bytecmp[16] = {};
  1579. int nr_bytes;
  1580. /* For pointer types, pointer size is not defined on a per-type basis.
  1581. * On dump creation however, we store the pointer size.
  1582. */
  1583. if (btf_kind(t) == BTF_KIND_PTR)
  1584. nr_bytes = d->ptr_sz;
  1585. else
  1586. nr_bytes = t->size;
  1587. if (nr_bytes < 1 || nr_bytes > 16) {
  1588. pr_warn("unexpected size %d for id [%u]\n", nr_bytes, id);
  1589. return -EINVAL;
  1590. }
  1591. if (memcmp(data, bytecmp, nr_bytes) == 0)
  1592. return -ENODATA;
  1593. return 0;
  1594. }
  1595. static bool ptr_is_aligned(const struct btf *btf, __u32 type_id,
  1596. const void *data)
  1597. {
  1598. int alignment = btf__align_of(btf, type_id);
  1599. if (alignment == 0)
  1600. return false;
  1601. return ((uintptr_t)data) % alignment == 0;
  1602. }
  1603. static int btf_dump_int_data(struct btf_dump *d,
  1604. const struct btf_type *t,
  1605. __u32 type_id,
  1606. const void *data,
  1607. __u8 bits_offset)
  1608. {
  1609. __u8 encoding = btf_int_encoding(t);
  1610. bool sign = encoding & BTF_INT_SIGNED;
  1611. char buf[16] __attribute__((aligned(16)));
  1612. int sz = t->size;
  1613. if (sz == 0 || sz > sizeof(buf)) {
  1614. pr_warn("unexpected size %d for id [%u]\n", sz, type_id);
  1615. return -EINVAL;
  1616. }
  1617. /* handle packed int data - accesses of integers not aligned on
  1618. * int boundaries can cause problems on some platforms.
  1619. */
  1620. if (!ptr_is_aligned(d->btf, type_id, data)) {
  1621. memcpy(buf, data, sz);
  1622. data = buf;
  1623. }
  1624. switch (sz) {
  1625. case 16: {
  1626. const __u64 *ints = data;
  1627. __u64 lsi, msi;
  1628. /* avoid use of __int128 as some 32-bit platforms do not
  1629. * support it.
  1630. */
  1631. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  1632. lsi = ints[0];
  1633. msi = ints[1];
  1634. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  1635. lsi = ints[1];
  1636. msi = ints[0];
  1637. #else
  1638. # error "Unrecognized __BYTE_ORDER__"
  1639. #endif
  1640. if (msi == 0)
  1641. btf_dump_type_values(d, "0x%llx", (unsigned long long)lsi);
  1642. else
  1643. btf_dump_type_values(d, "0x%llx%016llx", (unsigned long long)msi,
  1644. (unsigned long long)lsi);
  1645. break;
  1646. }
  1647. case 8:
  1648. if (sign)
  1649. btf_dump_type_values(d, "%lld", *(long long *)data);
  1650. else
  1651. btf_dump_type_values(d, "%llu", *(unsigned long long *)data);
  1652. break;
  1653. case 4:
  1654. if (sign)
  1655. btf_dump_type_values(d, "%d", *(__s32 *)data);
  1656. else
  1657. btf_dump_type_values(d, "%u", *(__u32 *)data);
  1658. break;
  1659. case 2:
  1660. if (sign)
  1661. btf_dump_type_values(d, "%d", *(__s16 *)data);
  1662. else
  1663. btf_dump_type_values(d, "%u", *(__u16 *)data);
  1664. break;
  1665. case 1:
  1666. if (d->typed_dump->is_array_char) {
  1667. /* check for null terminator */
  1668. if (d->typed_dump->is_array_terminated)
  1669. break;
  1670. if (*(char *)data == '\0') {
  1671. d->typed_dump->is_array_terminated = true;
  1672. break;
  1673. }
  1674. if (isprint(*(char *)data)) {
  1675. btf_dump_type_values(d, "'%c'", *(char *)data);
  1676. break;
  1677. }
  1678. }
  1679. if (sign)
  1680. btf_dump_type_values(d, "%d", *(__s8 *)data);
  1681. else
  1682. btf_dump_type_values(d, "%u", *(__u8 *)data);
  1683. break;
  1684. default:
  1685. pr_warn("unexpected sz %d for id [%u]\n", sz, type_id);
  1686. return -EINVAL;
  1687. }
  1688. return 0;
  1689. }
  1690. union float_data {
  1691. long double ld;
  1692. double d;
  1693. float f;
  1694. };
  1695. static int btf_dump_float_data(struct btf_dump *d,
  1696. const struct btf_type *t,
  1697. __u32 type_id,
  1698. const void *data)
  1699. {
  1700. const union float_data *flp = data;
  1701. union float_data fl;
  1702. int sz = t->size;
  1703. /* handle unaligned data; copy to local union */
  1704. if (!ptr_is_aligned(d->btf, type_id, data)) {
  1705. memcpy(&fl, data, sz);
  1706. flp = &fl;
  1707. }
  1708. switch (sz) {
  1709. case 16:
  1710. btf_dump_type_values(d, "%Lf", flp->ld);
  1711. break;
  1712. case 8:
  1713. btf_dump_type_values(d, "%lf", flp->d);
  1714. break;
  1715. case 4:
  1716. btf_dump_type_values(d, "%f", flp->f);
  1717. break;
  1718. default:
  1719. pr_warn("unexpected size %d for id [%u]\n", sz, type_id);
  1720. return -EINVAL;
  1721. }
  1722. return 0;
  1723. }
  1724. static int btf_dump_var_data(struct btf_dump *d,
  1725. const struct btf_type *v,
  1726. __u32 id,
  1727. const void *data)
  1728. {
  1729. enum btf_func_linkage linkage = btf_var(v)->linkage;
  1730. const struct btf_type *t;
  1731. const char *l;
  1732. __u32 type_id;
  1733. switch (linkage) {
  1734. case BTF_FUNC_STATIC:
  1735. l = "static ";
  1736. break;
  1737. case BTF_FUNC_EXTERN:
  1738. l = "extern ";
  1739. break;
  1740. case BTF_FUNC_GLOBAL:
  1741. default:
  1742. l = "";
  1743. break;
  1744. }
  1745. /* format of output here is [linkage] [type] [varname] = (type)value,
  1746. * for example "static int cpu_profile_flip = (int)1"
  1747. */
  1748. btf_dump_printf(d, "%s", l);
  1749. type_id = v->type;
  1750. t = btf__type_by_id(d->btf, type_id);
  1751. btf_dump_emit_type_cast(d, type_id, false);
  1752. btf_dump_printf(d, " %s = ", btf_name_of(d, v->name_off));
  1753. return btf_dump_dump_type_data(d, NULL, t, type_id, data, 0, 0);
  1754. }
  1755. static int btf_dump_array_data(struct btf_dump *d,
  1756. const struct btf_type *t,
  1757. __u32 id,
  1758. const void *data)
  1759. {
  1760. const struct btf_array *array = btf_array(t);
  1761. const struct btf_type *elem_type;
  1762. __u32 i, elem_type_id;
  1763. __s64 elem_size;
  1764. bool is_array_member;
  1765. elem_type_id = array->type;
  1766. elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
  1767. elem_size = btf__resolve_size(d->btf, elem_type_id);
  1768. if (elem_size <= 0) {
  1769. pr_warn("unexpected elem size %zd for array type [%u]\n",
  1770. (ssize_t)elem_size, id);
  1771. return -EINVAL;
  1772. }
  1773. if (btf_is_int(elem_type)) {
  1774. /*
  1775. * BTF_INT_CHAR encoding never seems to be set for
  1776. * char arrays, so if size is 1 and element is
  1777. * printable as a char, we'll do that.
  1778. */
  1779. if (elem_size == 1)
  1780. d->typed_dump->is_array_char = true;
  1781. }
  1782. /* note that we increment depth before calling btf_dump_print() below;
  1783. * this is intentional. btf_dump_data_newline() will not print a
  1784. * newline for depth 0 (since this leaves us with trailing newlines
  1785. * at the end of typed display), so depth is incremented first.
  1786. * For similar reasons, we decrement depth before showing the closing
  1787. * parenthesis.
  1788. */
  1789. d->typed_dump->depth++;
  1790. btf_dump_printf(d, "[%s", btf_dump_data_newline(d));
  1791. /* may be a multidimensional array, so store current "is array member"
  1792. * status so we can restore it correctly later.
  1793. */
  1794. is_array_member = d->typed_dump->is_array_member;
  1795. d->typed_dump->is_array_member = true;
  1796. for (i = 0; i < array->nelems; i++, data += elem_size) {
  1797. if (d->typed_dump->is_array_terminated)
  1798. break;
  1799. btf_dump_dump_type_data(d, NULL, elem_type, elem_type_id, data, 0, 0);
  1800. }
  1801. d->typed_dump->is_array_member = is_array_member;
  1802. d->typed_dump->depth--;
  1803. btf_dump_data_pfx(d);
  1804. btf_dump_type_values(d, "]");
  1805. return 0;
  1806. }
  1807. static int btf_dump_struct_data(struct btf_dump *d,
  1808. const struct btf_type *t,
  1809. __u32 id,
  1810. const void *data)
  1811. {
  1812. const struct btf_member *m = btf_members(t);
  1813. __u16 n = btf_vlen(t);
  1814. int i, err = 0;
  1815. /* note that we increment depth before calling btf_dump_print() below;
  1816. * this is intentional. btf_dump_data_newline() will not print a
  1817. * newline for depth 0 (since this leaves us with trailing newlines
  1818. * at the end of typed display), so depth is incremented first.
  1819. * For similar reasons, we decrement depth before showing the closing
  1820. * parenthesis.
  1821. */
  1822. d->typed_dump->depth++;
  1823. btf_dump_printf(d, "{%s", btf_dump_data_newline(d));
  1824. for (i = 0; i < n; i++, m++) {
  1825. const struct btf_type *mtype;
  1826. const char *mname;
  1827. __u32 moffset;
  1828. __u8 bit_sz;
  1829. mtype = btf__type_by_id(d->btf, m->type);
  1830. mname = btf_name_of(d, m->name_off);
  1831. moffset = btf_member_bit_offset(t, i);
  1832. bit_sz = btf_member_bitfield_size(t, i);
  1833. err = btf_dump_dump_type_data(d, mname, mtype, m->type, data + moffset / 8,
  1834. moffset % 8, bit_sz);
  1835. if (err < 0)
  1836. return err;
  1837. }
  1838. d->typed_dump->depth--;
  1839. btf_dump_data_pfx(d);
  1840. btf_dump_type_values(d, "}");
  1841. return err;
  1842. }
  1843. union ptr_data {
  1844. unsigned int p;
  1845. unsigned long long lp;
  1846. };
  1847. static int btf_dump_ptr_data(struct btf_dump *d,
  1848. const struct btf_type *t,
  1849. __u32 id,
  1850. const void *data)
  1851. {
  1852. if (ptr_is_aligned(d->btf, id, data) && d->ptr_sz == sizeof(void *)) {
  1853. btf_dump_type_values(d, "%p", *(void **)data);
  1854. } else {
  1855. union ptr_data pt;
  1856. memcpy(&pt, data, d->ptr_sz);
  1857. if (d->ptr_sz == 4)
  1858. btf_dump_type_values(d, "0x%x", pt.p);
  1859. else
  1860. btf_dump_type_values(d, "0x%llx", pt.lp);
  1861. }
  1862. return 0;
  1863. }
  1864. static int btf_dump_get_enum_value(struct btf_dump *d,
  1865. const struct btf_type *t,
  1866. const void *data,
  1867. __u32 id,
  1868. __s64 *value)
  1869. {
  1870. bool is_signed = btf_kflag(t);
  1871. if (!ptr_is_aligned(d->btf, id, data)) {
  1872. __u64 val;
  1873. int err;
  1874. err = btf_dump_get_bitfield_value(d, t, data, 0, 0, &val);
  1875. if (err)
  1876. return err;
  1877. *value = (__s64)val;
  1878. return 0;
  1879. }
  1880. switch (t->size) {
  1881. case 8:
  1882. *value = *(__s64 *)data;
  1883. return 0;
  1884. case 4:
  1885. *value = is_signed ? (__s64)*(__s32 *)data : *(__u32 *)data;
  1886. return 0;
  1887. case 2:
  1888. *value = is_signed ? *(__s16 *)data : *(__u16 *)data;
  1889. return 0;
  1890. case 1:
  1891. *value = is_signed ? *(__s8 *)data : *(__u8 *)data;
  1892. return 0;
  1893. default:
  1894. pr_warn("unexpected size %d for enum, id:[%u]\n", t->size, id);
  1895. return -EINVAL;
  1896. }
  1897. }
  1898. static int btf_dump_enum_data(struct btf_dump *d,
  1899. const struct btf_type *t,
  1900. __u32 id,
  1901. const void *data)
  1902. {
  1903. bool is_signed;
  1904. __s64 value;
  1905. int i, err;
  1906. err = btf_dump_get_enum_value(d, t, data, id, &value);
  1907. if (err)
  1908. return err;
  1909. is_signed = btf_kflag(t);
  1910. if (btf_is_enum(t)) {
  1911. const struct btf_enum *e;
  1912. for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) {
  1913. if (value != e->val)
  1914. continue;
  1915. btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
  1916. return 0;
  1917. }
  1918. btf_dump_type_values(d, is_signed ? "%d" : "%u", value);
  1919. } else {
  1920. const struct btf_enum64 *e;
  1921. for (i = 0, e = btf_enum64(t); i < btf_vlen(t); i++, e++) {
  1922. if (value != btf_enum64_value(e))
  1923. continue;
  1924. btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
  1925. return 0;
  1926. }
  1927. btf_dump_type_values(d, is_signed ? "%lldLL" : "%lluULL",
  1928. (unsigned long long)value);
  1929. }
  1930. return 0;
  1931. }
  1932. static int btf_dump_datasec_data(struct btf_dump *d,
  1933. const struct btf_type *t,
  1934. __u32 id,
  1935. const void *data)
  1936. {
  1937. const struct btf_var_secinfo *vsi;
  1938. const struct btf_type *var;
  1939. __u32 i;
  1940. int err;
  1941. btf_dump_type_values(d, "SEC(\"%s\") ", btf_name_of(d, t->name_off));
  1942. for (i = 0, vsi = btf_var_secinfos(t); i < btf_vlen(t); i++, vsi++) {
  1943. var = btf__type_by_id(d->btf, vsi->type);
  1944. err = btf_dump_dump_type_data(d, NULL, var, vsi->type, data + vsi->offset, 0, 0);
  1945. if (err < 0)
  1946. return err;
  1947. btf_dump_printf(d, ";");
  1948. }
  1949. return 0;
  1950. }
  1951. /* return size of type, or if base type overflows, return -E2BIG. */
  1952. static int btf_dump_type_data_check_overflow(struct btf_dump *d,
  1953. const struct btf_type *t,
  1954. __u32 id,
  1955. const void *data,
  1956. __u8 bits_offset,
  1957. __u8 bit_sz)
  1958. {
  1959. __s64 size;
  1960. if (bit_sz) {
  1961. /* bits_offset is at most 7. bit_sz is at most 128. */
  1962. __u8 nr_bytes = (bits_offset + bit_sz + 7) / 8;
  1963. /* When bit_sz is non zero, it is called from
  1964. * btf_dump_struct_data() where it only cares about
  1965. * negative error value.
  1966. * Return nr_bytes in success case to make it
  1967. * consistent as the regular integer case below.
  1968. */
  1969. return data + nr_bytes > d->typed_dump->data_end ? -E2BIG : nr_bytes;
  1970. }
  1971. size = btf__resolve_size(d->btf, id);
  1972. if (size < 0 || size >= INT_MAX) {
  1973. pr_warn("unexpected size [%zu] for id [%u]\n",
  1974. (size_t)size, id);
  1975. return -EINVAL;
  1976. }
  1977. /* Only do overflow checking for base types; we do not want to
  1978. * avoid showing part of a struct, union or array, even if we
  1979. * do not have enough data to show the full object. By
  1980. * restricting overflow checking to base types we can ensure
  1981. * that partial display succeeds, while avoiding overflowing
  1982. * and using bogus data for display.
  1983. */
  1984. t = skip_mods_and_typedefs(d->btf, id, NULL);
  1985. if (!t) {
  1986. pr_warn("unexpected error skipping mods/typedefs for id [%u]\n",
  1987. id);
  1988. return -EINVAL;
  1989. }
  1990. switch (btf_kind(t)) {
  1991. case BTF_KIND_INT:
  1992. case BTF_KIND_FLOAT:
  1993. case BTF_KIND_PTR:
  1994. case BTF_KIND_ENUM:
  1995. case BTF_KIND_ENUM64:
  1996. if (data + bits_offset / 8 + size > d->typed_dump->data_end)
  1997. return -E2BIG;
  1998. break;
  1999. default:
  2000. break;
  2001. }
  2002. return (int)size;
  2003. }
  2004. static int btf_dump_type_data_check_zero(struct btf_dump *d,
  2005. const struct btf_type *t,
  2006. __u32 id,
  2007. const void *data,
  2008. __u8 bits_offset,
  2009. __u8 bit_sz)
  2010. {
  2011. __s64 value;
  2012. int i, err;
  2013. /* toplevel exceptions; we show zero values if
  2014. * - we ask for them (emit_zeros)
  2015. * - if we are at top-level so we see "struct empty { }"
  2016. * - or if we are an array member and the array is non-empty and
  2017. * not a char array; we don't want to be in a situation where we
  2018. * have an integer array 0, 1, 0, 1 and only show non-zero values.
  2019. * If the array contains zeroes only, or is a char array starting
  2020. * with a '\0', the array-level check_zero() will prevent showing it;
  2021. * we are concerned with determining zero value at the array member
  2022. * level here.
  2023. */
  2024. if (d->typed_dump->emit_zeroes || d->typed_dump->depth == 0 ||
  2025. (d->typed_dump->is_array_member &&
  2026. !d->typed_dump->is_array_char))
  2027. return 0;
  2028. t = skip_mods_and_typedefs(d->btf, id, NULL);
  2029. switch (btf_kind(t)) {
  2030. case BTF_KIND_INT:
  2031. if (bit_sz)
  2032. return btf_dump_bitfield_check_zero(d, t, data, bits_offset, bit_sz);
  2033. return btf_dump_base_type_check_zero(d, t, id, data);
  2034. case BTF_KIND_FLOAT:
  2035. case BTF_KIND_PTR:
  2036. return btf_dump_base_type_check_zero(d, t, id, data);
  2037. case BTF_KIND_ARRAY: {
  2038. const struct btf_array *array = btf_array(t);
  2039. const struct btf_type *elem_type;
  2040. __u32 elem_type_id, elem_size;
  2041. bool ischar;
  2042. elem_type_id = array->type;
  2043. elem_size = btf__resolve_size(d->btf, elem_type_id);
  2044. elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
  2045. ischar = btf_is_int(elem_type) && elem_size == 1;
  2046. /* check all elements; if _any_ element is nonzero, all
  2047. * of array is displayed. We make an exception however
  2048. * for char arrays where the first element is 0; these
  2049. * are considered zeroed also, even if later elements are
  2050. * non-zero because the string is terminated.
  2051. */
  2052. for (i = 0; i < array->nelems; i++) {
  2053. if (i == 0 && ischar && *(char *)data == 0)
  2054. return -ENODATA;
  2055. err = btf_dump_type_data_check_zero(d, elem_type,
  2056. elem_type_id,
  2057. data +
  2058. (i * elem_size),
  2059. bits_offset, 0);
  2060. if (err != -ENODATA)
  2061. return err;
  2062. }
  2063. return -ENODATA;
  2064. }
  2065. case BTF_KIND_STRUCT:
  2066. case BTF_KIND_UNION: {
  2067. const struct btf_member *m = btf_members(t);
  2068. __u16 n = btf_vlen(t);
  2069. /* if any struct/union member is non-zero, the struct/union
  2070. * is considered non-zero and dumped.
  2071. */
  2072. for (i = 0; i < n; i++, m++) {
  2073. const struct btf_type *mtype;
  2074. __u32 moffset;
  2075. mtype = btf__type_by_id(d->btf, m->type);
  2076. moffset = btf_member_bit_offset(t, i);
  2077. /* btf_int_bits() does not store member bitfield size;
  2078. * bitfield size needs to be stored here so int display
  2079. * of member can retrieve it.
  2080. */
  2081. bit_sz = btf_member_bitfield_size(t, i);
  2082. err = btf_dump_type_data_check_zero(d, mtype, m->type, data + moffset / 8,
  2083. moffset % 8, bit_sz);
  2084. if (err != ENODATA)
  2085. return err;
  2086. }
  2087. return -ENODATA;
  2088. }
  2089. case BTF_KIND_ENUM:
  2090. case BTF_KIND_ENUM64:
  2091. err = btf_dump_get_enum_value(d, t, data, id, &value);
  2092. if (err)
  2093. return err;
  2094. if (value == 0)
  2095. return -ENODATA;
  2096. return 0;
  2097. default:
  2098. return 0;
  2099. }
  2100. }
  2101. /* returns size of data dumped, or error. */
  2102. static int btf_dump_dump_type_data(struct btf_dump *d,
  2103. const char *fname,
  2104. const struct btf_type *t,
  2105. __u32 id,
  2106. const void *data,
  2107. __u8 bits_offset,
  2108. __u8 bit_sz)
  2109. {
  2110. int size, err = 0;
  2111. size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset, bit_sz);
  2112. if (size < 0)
  2113. return size;
  2114. err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz);
  2115. if (err) {
  2116. /* zeroed data is expected and not an error, so simply skip
  2117. * dumping such data. Record other errors however.
  2118. */
  2119. if (err == -ENODATA)
  2120. return size;
  2121. return err;
  2122. }
  2123. btf_dump_data_pfx(d);
  2124. if (!d->typed_dump->skip_names) {
  2125. if (fname && strlen(fname) > 0)
  2126. btf_dump_printf(d, ".%s = ", fname);
  2127. btf_dump_emit_type_cast(d, id, true);
  2128. }
  2129. t = skip_mods_and_typedefs(d->btf, id, NULL);
  2130. switch (btf_kind(t)) {
  2131. case BTF_KIND_UNKN:
  2132. case BTF_KIND_FWD:
  2133. case BTF_KIND_FUNC:
  2134. case BTF_KIND_FUNC_PROTO:
  2135. case BTF_KIND_DECL_TAG:
  2136. err = btf_dump_unsupported_data(d, t, id);
  2137. break;
  2138. case BTF_KIND_INT:
  2139. if (bit_sz)
  2140. err = btf_dump_bitfield_data(d, t, data, bits_offset, bit_sz);
  2141. else
  2142. err = btf_dump_int_data(d, t, id, data, bits_offset);
  2143. break;
  2144. case BTF_KIND_FLOAT:
  2145. err = btf_dump_float_data(d, t, id, data);
  2146. break;
  2147. case BTF_KIND_PTR:
  2148. err = btf_dump_ptr_data(d, t, id, data);
  2149. break;
  2150. case BTF_KIND_ARRAY:
  2151. err = btf_dump_array_data(d, t, id, data);
  2152. break;
  2153. case BTF_KIND_STRUCT:
  2154. case BTF_KIND_UNION:
  2155. err = btf_dump_struct_data(d, t, id, data);
  2156. break;
  2157. case BTF_KIND_ENUM:
  2158. case BTF_KIND_ENUM64:
  2159. /* handle bitfield and int enum values */
  2160. if (bit_sz) {
  2161. __u64 print_num;
  2162. __s64 enum_val;
  2163. err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz,
  2164. &print_num);
  2165. if (err)
  2166. break;
  2167. enum_val = (__s64)print_num;
  2168. err = btf_dump_enum_data(d, t, id, &enum_val);
  2169. } else
  2170. err = btf_dump_enum_data(d, t, id, data);
  2171. break;
  2172. case BTF_KIND_VAR:
  2173. err = btf_dump_var_data(d, t, id, data);
  2174. break;
  2175. case BTF_KIND_DATASEC:
  2176. err = btf_dump_datasec_data(d, t, id, data);
  2177. break;
  2178. default:
  2179. pr_warn("unexpected kind [%u] for id [%u]\n",
  2180. BTF_INFO_KIND(t->info), id);
  2181. return -EINVAL;
  2182. }
  2183. if (err < 0)
  2184. return err;
  2185. return size;
  2186. }
  2187. int btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
  2188. const void *data, size_t data_sz,
  2189. const struct btf_dump_type_data_opts *opts)
  2190. {
  2191. struct btf_dump_data typed_dump = {};
  2192. const struct btf_type *t;
  2193. int ret;
  2194. if (!OPTS_VALID(opts, btf_dump_type_data_opts))
  2195. return libbpf_err(-EINVAL);
  2196. t = btf__type_by_id(d->btf, id);
  2197. if (!t)
  2198. return libbpf_err(-ENOENT);
  2199. d->typed_dump = &typed_dump;
  2200. d->typed_dump->data_end = data + data_sz;
  2201. d->typed_dump->indent_lvl = OPTS_GET(opts, indent_level, 0);
  2202. /* default indent string is a tab */
  2203. if (!OPTS_GET(opts, indent_str, NULL))
  2204. d->typed_dump->indent_str[0] = '\t';
  2205. else
  2206. libbpf_strlcpy(d->typed_dump->indent_str, opts->indent_str,
  2207. sizeof(d->typed_dump->indent_str));
  2208. d->typed_dump->compact = OPTS_GET(opts, compact, false);
  2209. d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false);
  2210. d->typed_dump->emit_zeroes = OPTS_GET(opts, emit_zeroes, false);
  2211. ret = btf_dump_dump_type_data(d, NULL, t, id, data, 0, 0);
  2212. d->typed_dump = NULL;
  2213. return libbpf_err(ret);
  2214. }