kallsyms.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * kallsyms.c: in-kernel printing of symbolic oopses and stack traces.
  4. *
  5. * Rewritten and vastly simplified by Rusty Russell for in-kernel
  6. * module loader:
  7. * Copyright 2002 Rusty Russell <[email protected]> IBM Corporation
  8. *
  9. * ChangeLog:
  10. *
  11. * (25/Aug/2004) Paulo Marques <[email protected]>
  12. * Changed the compression method from stem compression to "table lookup"
  13. * compression (see scripts/kallsyms.c for a more complete description)
  14. */
  15. #include <linux/kallsyms.h>
  16. #include <linux/init.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/fs.h>
  19. #include <linux/kdb.h>
  20. #include <linux/err.h>
  21. #include <linux/proc_fs.h>
  22. #include <linux/sched.h> /* for cond_resched */
  23. #include <linux/ctype.h>
  24. #include <linux/slab.h>
  25. #include <linux/filter.h>
  26. #include <linux/ftrace.h>
  27. #include <linux/kprobes.h>
  28. #include <linux/build_bug.h>
  29. #include <linux/compiler.h>
  30. #include <linux/module.h>
  31. #include <linux/kernel.h>
  32. #include <linux/bsearch.h>
  33. #include <linux/btf_ids.h>
  34. #include "kallsyms_internal.h"
  35. /*
  36. * Expand a compressed symbol data into the resulting uncompressed string,
  37. * if uncompressed string is too long (>= maxlen), it will be truncated,
  38. * given the offset to where the symbol is in the compressed stream.
  39. */
  40. static unsigned int kallsyms_expand_symbol(unsigned int off,
  41. char *result, size_t maxlen)
  42. {
  43. int len, skipped_first = 0;
  44. const char *tptr;
  45. const u8 *data;
  46. /* Get the compressed symbol length from the first symbol byte. */
  47. data = &kallsyms_names[off];
  48. len = *data;
  49. data++;
  50. off++;
  51. /* If MSB is 1, it is a "big" symbol, so needs an additional byte. */
  52. if ((len & 0x80) != 0) {
  53. len = (len & 0x7F) | (*data << 7);
  54. data++;
  55. off++;
  56. }
  57. /*
  58. * Update the offset to return the offset for the next symbol on
  59. * the compressed stream.
  60. */
  61. off += len;
  62. /*
  63. * For every byte on the compressed symbol data, copy the table
  64. * entry for that byte.
  65. */
  66. while (len) {
  67. tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
  68. data++;
  69. len--;
  70. while (*tptr) {
  71. if (skipped_first) {
  72. if (maxlen <= 1)
  73. goto tail;
  74. *result = *tptr;
  75. result++;
  76. maxlen--;
  77. } else
  78. skipped_first = 1;
  79. tptr++;
  80. }
  81. }
  82. tail:
  83. if (maxlen)
  84. *result = '\0';
  85. /* Return to offset to the next symbol. */
  86. return off;
  87. }
  88. /*
  89. * Get symbol type information. This is encoded as a single char at the
  90. * beginning of the symbol name.
  91. */
  92. static char kallsyms_get_symbol_type(unsigned int off)
  93. {
  94. /*
  95. * Get just the first code, look it up in the token table,
  96. * and return the first char from this token.
  97. */
  98. return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]];
  99. }
  100. /*
  101. * Find the offset on the compressed stream given and index in the
  102. * kallsyms array.
  103. */
  104. static unsigned int get_symbol_offset(unsigned long pos)
  105. {
  106. const u8 *name;
  107. int i, len;
  108. /*
  109. * Use the closest marker we have. We have markers every 256 positions,
  110. * so that should be close enough.
  111. */
  112. name = &kallsyms_names[kallsyms_markers[pos >> 8]];
  113. /*
  114. * Sequentially scan all the symbols up to the point we're searching
  115. * for. Every symbol is stored in a [<len>][<len> bytes of data] format,
  116. * so we just need to add the len to the current pointer for every
  117. * symbol we wish to skip.
  118. */
  119. for (i = 0; i < (pos & 0xFF); i++) {
  120. len = *name;
  121. /*
  122. * If MSB is 1, it is a "big" symbol, so we need to look into
  123. * the next byte (and skip it, too).
  124. */
  125. if ((len & 0x80) != 0)
  126. len = ((len & 0x7F) | (name[1] << 7)) + 1;
  127. name = name + len + 1;
  128. }
  129. return name - kallsyms_names;
  130. }
  131. static unsigned long kallsyms_sym_address(int idx)
  132. {
  133. if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
  134. return kallsyms_addresses[idx];
  135. /* values are unsigned offsets if --absolute-percpu is not in effect */
  136. if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU))
  137. return kallsyms_relative_base + (u32)kallsyms_offsets[idx];
  138. /* ...otherwise, positive offsets are absolute values */
  139. if (kallsyms_offsets[idx] >= 0)
  140. return kallsyms_offsets[idx];
  141. /* ...and negative offsets are relative to kallsyms_relative_base - 1 */
  142. return kallsyms_relative_base - 1 - kallsyms_offsets[idx];
  143. }
  144. static bool cleanup_symbol_name(char *s)
  145. {
  146. char *res;
  147. if (!IS_ENABLED(CONFIG_LTO_CLANG))
  148. return false;
  149. /*
  150. * LLVM appends various suffixes for local functions and variables that
  151. * must be promoted to global scope as part of LTO. This can break
  152. * hooking of static functions with kprobes. '.' is not a valid
  153. * character in an identifier in C. Suffixes only in LLVM LTO observed:
  154. * - foo.llvm.[0-9a-f]+
  155. */
  156. res = strstr(s, ".llvm.");
  157. if (res) {
  158. *res = '\0';
  159. return true;
  160. }
  161. return false;
  162. }
  163. static int compare_symbol_name(const char *name, char *namebuf)
  164. {
  165. /* The kallsyms_seqs_of_names is sorted based on names after
  166. * cleanup_symbol_name() (see scripts/kallsyms.c) if clang lto is enabled.
  167. * To ensure correct bisection in kallsyms_lookup_names(), do
  168. * cleanup_symbol_name(namebuf) before comparing name and namebuf.
  169. */
  170. cleanup_symbol_name(namebuf);
  171. return strcmp(name, namebuf);
  172. }
  173. static unsigned int get_symbol_seq(int index)
  174. {
  175. unsigned int i, seq = 0;
  176. for (i = 0; i < 3; i++)
  177. seq = (seq << 8) | kallsyms_seqs_of_names[3 * index + i];
  178. return seq;
  179. }
  180. static int kallsyms_lookup_names(const char *name,
  181. unsigned int *start,
  182. unsigned int *end)
  183. {
  184. int ret;
  185. int low, mid, high;
  186. unsigned int seq, off;
  187. char namebuf[KSYM_NAME_LEN];
  188. low = 0;
  189. high = kallsyms_num_syms - 1;
  190. while (low <= high) {
  191. mid = low + (high - low) / 2;
  192. seq = get_symbol_seq(mid);
  193. off = get_symbol_offset(seq);
  194. kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
  195. ret = compare_symbol_name(name, namebuf);
  196. if (ret > 0)
  197. low = mid + 1;
  198. else if (ret < 0)
  199. high = mid - 1;
  200. else
  201. break;
  202. }
  203. if (low > high)
  204. return -ESRCH;
  205. low = mid;
  206. while (low) {
  207. seq = get_symbol_seq(low - 1);
  208. off = get_symbol_offset(seq);
  209. kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
  210. if (compare_symbol_name(name, namebuf))
  211. break;
  212. low--;
  213. }
  214. *start = low;
  215. if (end) {
  216. high = mid;
  217. while (high < kallsyms_num_syms - 1) {
  218. seq = get_symbol_seq(high + 1);
  219. off = get_symbol_offset(seq);
  220. kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
  221. if (compare_symbol_name(name, namebuf))
  222. break;
  223. high++;
  224. }
  225. *end = high;
  226. }
  227. return 0;
  228. }
  229. /* Lookup the address for this symbol. Returns 0 if not found. */
  230. unsigned long kallsyms_lookup_name(const char *name)
  231. {
  232. int ret;
  233. unsigned int i;
  234. /* Skip the search for empty string. */
  235. if (!*name)
  236. return 0;
  237. ret = kallsyms_lookup_names(name, &i, NULL);
  238. if (!ret)
  239. return kallsyms_sym_address(get_symbol_seq(i));
  240. return module_kallsyms_lookup_name(name);
  241. }
  242. /*
  243. * Iterate over all symbols in vmlinux. For symbols from modules use
  244. * module_kallsyms_on_each_symbol instead.
  245. */
  246. int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
  247. unsigned long),
  248. void *data)
  249. {
  250. char namebuf[KSYM_NAME_LEN];
  251. unsigned long i;
  252. unsigned int off;
  253. int ret;
  254. for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
  255. off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
  256. ret = fn(data, namebuf, NULL, kallsyms_sym_address(i));
  257. if (ret != 0)
  258. return ret;
  259. cond_resched();
  260. }
  261. return 0;
  262. }
  263. int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
  264. const char *name, void *data)
  265. {
  266. int ret;
  267. unsigned int i, start, end;
  268. ret = kallsyms_lookup_names(name, &start, &end);
  269. if (ret)
  270. return 0;
  271. for (i = start; !ret && i <= end; i++) {
  272. ret = fn(data, kallsyms_sym_address(get_symbol_seq(i)));
  273. cond_resched();
  274. }
  275. return ret;
  276. }
  277. static unsigned long get_symbol_pos(unsigned long addr,
  278. unsigned long *symbolsize,
  279. unsigned long *offset)
  280. {
  281. unsigned long symbol_start = 0, symbol_end = 0;
  282. unsigned long i, low, high, mid;
  283. /* This kernel should never had been booted. */
  284. if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
  285. BUG_ON(!kallsyms_addresses);
  286. else
  287. BUG_ON(!kallsyms_offsets);
  288. /* Do a binary search on the sorted kallsyms_addresses array. */
  289. low = 0;
  290. high = kallsyms_num_syms;
  291. while (high - low > 1) {
  292. mid = low + (high - low) / 2;
  293. if (kallsyms_sym_address(mid) <= addr)
  294. low = mid;
  295. else
  296. high = mid;
  297. }
  298. /*
  299. * Search for the first aliased symbol. Aliased
  300. * symbols are symbols with the same address.
  301. */
  302. while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low))
  303. --low;
  304. symbol_start = kallsyms_sym_address(low);
  305. /* Search for next non-aliased symbol. */
  306. for (i = low + 1; i < kallsyms_num_syms; i++) {
  307. if (kallsyms_sym_address(i) > symbol_start) {
  308. symbol_end = kallsyms_sym_address(i);
  309. break;
  310. }
  311. }
  312. /* If we found no next symbol, we use the end of the section. */
  313. if (!symbol_end) {
  314. if (is_kernel_inittext(addr))
  315. symbol_end = (unsigned long)_einittext;
  316. else if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
  317. symbol_end = (unsigned long)_end;
  318. else
  319. symbol_end = (unsigned long)_etext;
  320. }
  321. if (symbolsize)
  322. *symbolsize = symbol_end - symbol_start;
  323. if (offset)
  324. *offset = addr - symbol_start;
  325. return low;
  326. }
  327. /*
  328. * Lookup an address but don't bother to find any names.
  329. */
  330. int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
  331. unsigned long *offset)
  332. {
  333. char namebuf[KSYM_NAME_LEN];
  334. if (is_ksym_addr(addr)) {
  335. get_symbol_pos(addr, symbolsize, offset);
  336. return 1;
  337. }
  338. return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) ||
  339. !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
  340. }
  341. static const char *kallsyms_lookup_buildid(unsigned long addr,
  342. unsigned long *symbolsize,
  343. unsigned long *offset, char **modname,
  344. const unsigned char **modbuildid, char *namebuf)
  345. {
  346. const char *ret;
  347. namebuf[KSYM_NAME_LEN - 1] = 0;
  348. namebuf[0] = 0;
  349. if (is_ksym_addr(addr)) {
  350. unsigned long pos;
  351. pos = get_symbol_pos(addr, symbolsize, offset);
  352. /* Grab name */
  353. kallsyms_expand_symbol(get_symbol_offset(pos),
  354. namebuf, KSYM_NAME_LEN);
  355. if (modname)
  356. *modname = NULL;
  357. if (modbuildid)
  358. *modbuildid = NULL;
  359. ret = namebuf;
  360. goto found;
  361. }
  362. /* See if it's in a module or a BPF JITed image. */
  363. ret = module_address_lookup(addr, symbolsize, offset,
  364. modname, modbuildid, namebuf);
  365. if (!ret)
  366. ret = bpf_address_lookup(addr, symbolsize,
  367. offset, modname, namebuf);
  368. if (!ret)
  369. ret = ftrace_mod_address_lookup(addr, symbolsize,
  370. offset, modname, namebuf);
  371. found:
  372. cleanup_symbol_name(namebuf);
  373. return ret;
  374. }
  375. /*
  376. * Lookup an address
  377. * - modname is set to NULL if it's in the kernel.
  378. * - We guarantee that the returned name is valid until we reschedule even if.
  379. * It resides in a module.
  380. * - We also guarantee that modname will be valid until rescheduled.
  381. */
  382. const char *kallsyms_lookup(unsigned long addr,
  383. unsigned long *symbolsize,
  384. unsigned long *offset,
  385. char **modname, char *namebuf)
  386. {
  387. return kallsyms_lookup_buildid(addr, symbolsize, offset, modname,
  388. NULL, namebuf);
  389. }
  390. int lookup_symbol_name(unsigned long addr, char *symname)
  391. {
  392. int res;
  393. symname[0] = '\0';
  394. symname[KSYM_NAME_LEN - 1] = '\0';
  395. if (is_ksym_addr(addr)) {
  396. unsigned long pos;
  397. pos = get_symbol_pos(addr, NULL, NULL);
  398. /* Grab name */
  399. kallsyms_expand_symbol(get_symbol_offset(pos),
  400. symname, KSYM_NAME_LEN);
  401. goto found;
  402. }
  403. /* See if it's in a module. */
  404. res = lookup_module_symbol_name(addr, symname);
  405. if (res)
  406. return res;
  407. found:
  408. cleanup_symbol_name(symname);
  409. return 0;
  410. }
  411. int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
  412. unsigned long *offset, char *modname, char *name)
  413. {
  414. int res;
  415. name[0] = '\0';
  416. name[KSYM_NAME_LEN - 1] = '\0';
  417. if (is_ksym_addr(addr)) {
  418. unsigned long pos;
  419. pos = get_symbol_pos(addr, size, offset);
  420. /* Grab name */
  421. kallsyms_expand_symbol(get_symbol_offset(pos),
  422. name, KSYM_NAME_LEN);
  423. modname[0] = '\0';
  424. goto found;
  425. }
  426. /* See if it's in a module. */
  427. res = lookup_module_symbol_attrs(addr, size, offset, modname, name);
  428. if (res)
  429. return res;
  430. found:
  431. cleanup_symbol_name(name);
  432. return 0;
  433. }
  434. /* Look up a kernel symbol and return it in a text buffer. */
  435. static int __sprint_symbol(char *buffer, unsigned long address,
  436. int symbol_offset, int add_offset, int add_buildid)
  437. {
  438. char *modname;
  439. const unsigned char *buildid;
  440. const char *name;
  441. unsigned long offset, size;
  442. int len;
  443. address += symbol_offset;
  444. name = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid,
  445. buffer);
  446. if (!name)
  447. return sprintf(buffer, "0x%lx", address - symbol_offset);
  448. if (name != buffer)
  449. strcpy(buffer, name);
  450. len = strlen(buffer);
  451. offset -= symbol_offset;
  452. if (add_offset)
  453. len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
  454. if (modname) {
  455. len += sprintf(buffer + len, " [%s", modname);
  456. #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
  457. if (add_buildid && buildid) {
  458. /* build ID should match length of sprintf */
  459. #if IS_ENABLED(CONFIG_MODULES)
  460. static_assert(sizeof(typeof_member(struct module, build_id)) == 20);
  461. #endif
  462. len += sprintf(buffer + len, " %20phN", buildid);
  463. }
  464. #endif
  465. len += sprintf(buffer + len, "]");
  466. }
  467. return len;
  468. }
  469. /**
  470. * sprint_symbol - Look up a kernel symbol and return it in a text buffer
  471. * @buffer: buffer to be stored
  472. * @address: address to lookup
  473. *
  474. * This function looks up a kernel symbol with @address and stores its name,
  475. * offset, size and module name to @buffer if possible. If no symbol was found,
  476. * just saves its @address as is.
  477. *
  478. * This function returns the number of bytes stored in @buffer.
  479. */
  480. int sprint_symbol(char *buffer, unsigned long address)
  481. {
  482. return __sprint_symbol(buffer, address, 0, 1, 0);
  483. }
  484. EXPORT_SYMBOL_GPL(sprint_symbol);
  485. /**
  486. * sprint_symbol_build_id - Look up a kernel symbol and return it in a text buffer
  487. * @buffer: buffer to be stored
  488. * @address: address to lookup
  489. *
  490. * This function looks up a kernel symbol with @address and stores its name,
  491. * offset, size, module name and module build ID to @buffer if possible. If no
  492. * symbol was found, just saves its @address as is.
  493. *
  494. * This function returns the number of bytes stored in @buffer.
  495. */
  496. int sprint_symbol_build_id(char *buffer, unsigned long address)
  497. {
  498. return __sprint_symbol(buffer, address, 0, 1, 1);
  499. }
  500. EXPORT_SYMBOL_GPL(sprint_symbol_build_id);
  501. /**
  502. * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
  503. * @buffer: buffer to be stored
  504. * @address: address to lookup
  505. *
  506. * This function looks up a kernel symbol with @address and stores its name
  507. * and module name to @buffer if possible. If no symbol was found, just saves
  508. * its @address as is.
  509. *
  510. * This function returns the number of bytes stored in @buffer.
  511. */
  512. int sprint_symbol_no_offset(char *buffer, unsigned long address)
  513. {
  514. return __sprint_symbol(buffer, address, 0, 0, 0);
  515. }
  516. EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
  517. /**
  518. * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
  519. * @buffer: buffer to be stored
  520. * @address: address to lookup
  521. *
  522. * This function is for stack backtrace and does the same thing as
  523. * sprint_symbol() but with modified/decreased @address. If there is a
  524. * tail-call to the function marked "noreturn", gcc optimized out code after
  525. * the call so that the stack-saved return address could point outside of the
  526. * caller. This function ensures that kallsyms will find the original caller
  527. * by decreasing @address.
  528. *
  529. * This function returns the number of bytes stored in @buffer.
  530. */
  531. int sprint_backtrace(char *buffer, unsigned long address)
  532. {
  533. return __sprint_symbol(buffer, address, -1, 1, 0);
  534. }
  535. /**
  536. * sprint_backtrace_build_id - Look up a backtrace symbol and return it in a text buffer
  537. * @buffer: buffer to be stored
  538. * @address: address to lookup
  539. *
  540. * This function is for stack backtrace and does the same thing as
  541. * sprint_symbol() but with modified/decreased @address. If there is a
  542. * tail-call to the function marked "noreturn", gcc optimized out code after
  543. * the call so that the stack-saved return address could point outside of the
  544. * caller. This function ensures that kallsyms will find the original caller
  545. * by decreasing @address. This function also appends the module build ID to
  546. * the @buffer if @address is within a kernel module.
  547. *
  548. * This function returns the number of bytes stored in @buffer.
  549. */
  550. int sprint_backtrace_build_id(char *buffer, unsigned long address)
  551. {
  552. return __sprint_symbol(buffer, address, -1, 1, 1);
  553. }
  554. /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
  555. struct kallsym_iter {
  556. loff_t pos;
  557. loff_t pos_arch_end;
  558. loff_t pos_mod_end;
  559. loff_t pos_ftrace_mod_end;
  560. loff_t pos_bpf_end;
  561. unsigned long value;
  562. unsigned int nameoff; /* If iterating in core kernel symbols. */
  563. char type;
  564. char name[KSYM_NAME_LEN];
  565. char module_name[MODULE_NAME_LEN];
  566. int exported;
  567. int show_value;
  568. };
  569. int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
  570. char *type, char *name)
  571. {
  572. return -EINVAL;
  573. }
  574. static int get_ksymbol_arch(struct kallsym_iter *iter)
  575. {
  576. int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
  577. &iter->value, &iter->type,
  578. iter->name);
  579. if (ret < 0) {
  580. iter->pos_arch_end = iter->pos;
  581. return 0;
  582. }
  583. return 1;
  584. }
  585. static int get_ksymbol_mod(struct kallsym_iter *iter)
  586. {
  587. int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
  588. &iter->value, &iter->type,
  589. iter->name, iter->module_name,
  590. &iter->exported);
  591. if (ret < 0) {
  592. iter->pos_mod_end = iter->pos;
  593. return 0;
  594. }
  595. return 1;
  596. }
  597. /*
  598. * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace
  599. * purposes. In that case "__builtin__ftrace" is used as a module name, even
  600. * though "__builtin__ftrace" is not a module.
  601. */
  602. static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
  603. {
  604. int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
  605. &iter->value, &iter->type,
  606. iter->name, iter->module_name,
  607. &iter->exported);
  608. if (ret < 0) {
  609. iter->pos_ftrace_mod_end = iter->pos;
  610. return 0;
  611. }
  612. return 1;
  613. }
  614. static int get_ksymbol_bpf(struct kallsym_iter *iter)
  615. {
  616. int ret;
  617. strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
  618. iter->exported = 0;
  619. ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
  620. &iter->value, &iter->type,
  621. iter->name);
  622. if (ret < 0) {
  623. iter->pos_bpf_end = iter->pos;
  624. return 0;
  625. }
  626. return 1;
  627. }
  628. /*
  629. * This uses "__builtin__kprobes" as a module name for symbols for pages
  630. * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a
  631. * module.
  632. */
  633. static int get_ksymbol_kprobe(struct kallsym_iter *iter)
  634. {
  635. strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN);
  636. iter->exported = 0;
  637. return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end,
  638. &iter->value, &iter->type,
  639. iter->name) < 0 ? 0 : 1;
  640. }
  641. /* Returns space to next name. */
  642. static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
  643. {
  644. unsigned off = iter->nameoff;
  645. iter->module_name[0] = '\0';
  646. iter->value = kallsyms_sym_address(iter->pos);
  647. iter->type = kallsyms_get_symbol_type(off);
  648. off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name));
  649. return off - iter->nameoff;
  650. }
  651. static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
  652. {
  653. iter->name[0] = '\0';
  654. iter->nameoff = get_symbol_offset(new_pos);
  655. iter->pos = new_pos;
  656. if (new_pos == 0) {
  657. iter->pos_arch_end = 0;
  658. iter->pos_mod_end = 0;
  659. iter->pos_ftrace_mod_end = 0;
  660. iter->pos_bpf_end = 0;
  661. }
  662. }
  663. /*
  664. * The end position (last + 1) of each additional kallsyms section is recorded
  665. * in iter->pos_..._end as each section is added, and so can be used to
  666. * determine which get_ksymbol_...() function to call next.
  667. */
  668. static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
  669. {
  670. iter->pos = pos;
  671. if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
  672. get_ksymbol_arch(iter))
  673. return 1;
  674. if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
  675. get_ksymbol_mod(iter))
  676. return 1;
  677. if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
  678. get_ksymbol_ftrace_mod(iter))
  679. return 1;
  680. if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) &&
  681. get_ksymbol_bpf(iter))
  682. return 1;
  683. return get_ksymbol_kprobe(iter);
  684. }
  685. /* Returns false if pos at or past end of file. */
  686. static int update_iter(struct kallsym_iter *iter, loff_t pos)
  687. {
  688. /* Module symbols can be accessed randomly. */
  689. if (pos >= kallsyms_num_syms)
  690. return update_iter_mod(iter, pos);
  691. /* If we're not on the desired position, reset to new position. */
  692. if (pos != iter->pos)
  693. reset_iter(iter, pos);
  694. iter->nameoff += get_ksymbol_core(iter);
  695. iter->pos++;
  696. return 1;
  697. }
  698. static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  699. {
  700. (*pos)++;
  701. if (!update_iter(m->private, *pos))
  702. return NULL;
  703. return p;
  704. }
  705. static void *s_start(struct seq_file *m, loff_t *pos)
  706. {
  707. if (!update_iter(m->private, *pos))
  708. return NULL;
  709. return m->private;
  710. }
  711. static void s_stop(struct seq_file *m, void *p)
  712. {
  713. }
  714. static int s_show(struct seq_file *m, void *p)
  715. {
  716. void *value;
  717. struct kallsym_iter *iter = m->private;
  718. /* Some debugging symbols have no name. Ignore them. */
  719. if (!iter->name[0])
  720. return 0;
  721. value = iter->show_value ? (void *)iter->value : NULL;
  722. if (iter->module_name[0]) {
  723. char type;
  724. /*
  725. * Label it "global" if it is exported,
  726. * "local" if not exported.
  727. */
  728. type = iter->exported ? toupper(iter->type) :
  729. tolower(iter->type);
  730. seq_printf(m, "%px %c %s\t[%s]\n", value,
  731. type, iter->name, iter->module_name);
  732. } else
  733. seq_printf(m, "%px %c %s\n", value,
  734. iter->type, iter->name);
  735. return 0;
  736. }
  737. static const struct seq_operations kallsyms_op = {
  738. .start = s_start,
  739. .next = s_next,
  740. .stop = s_stop,
  741. .show = s_show
  742. };
  743. #ifdef CONFIG_BPF_SYSCALL
  744. struct bpf_iter__ksym {
  745. __bpf_md_ptr(struct bpf_iter_meta *, meta);
  746. __bpf_md_ptr(struct kallsym_iter *, ksym);
  747. };
  748. static int ksym_prog_seq_show(struct seq_file *m, bool in_stop)
  749. {
  750. struct bpf_iter__ksym ctx;
  751. struct bpf_iter_meta meta;
  752. struct bpf_prog *prog;
  753. meta.seq = m;
  754. prog = bpf_iter_get_info(&meta, in_stop);
  755. if (!prog)
  756. return 0;
  757. ctx.meta = &meta;
  758. ctx.ksym = m ? m->private : NULL;
  759. return bpf_iter_run_prog(prog, &ctx);
  760. }
  761. static int bpf_iter_ksym_seq_show(struct seq_file *m, void *p)
  762. {
  763. return ksym_prog_seq_show(m, false);
  764. }
  765. static void bpf_iter_ksym_seq_stop(struct seq_file *m, void *p)
  766. {
  767. if (!p)
  768. (void) ksym_prog_seq_show(m, true);
  769. else
  770. s_stop(m, p);
  771. }
  772. static const struct seq_operations bpf_iter_ksym_ops = {
  773. .start = s_start,
  774. .next = s_next,
  775. .stop = bpf_iter_ksym_seq_stop,
  776. .show = bpf_iter_ksym_seq_show,
  777. };
  778. static int bpf_iter_ksym_init(void *priv_data, struct bpf_iter_aux_info *aux)
  779. {
  780. struct kallsym_iter *iter = priv_data;
  781. reset_iter(iter, 0);
  782. /* cache here as in kallsyms_open() case; use current process
  783. * credentials to tell BPF iterators if values should be shown.
  784. */
  785. iter->show_value = kallsyms_show_value(current_cred());
  786. return 0;
  787. }
  788. DEFINE_BPF_ITER_FUNC(ksym, struct bpf_iter_meta *meta, struct kallsym_iter *ksym)
  789. static const struct bpf_iter_seq_info ksym_iter_seq_info = {
  790. .seq_ops = &bpf_iter_ksym_ops,
  791. .init_seq_private = bpf_iter_ksym_init,
  792. .fini_seq_private = NULL,
  793. .seq_priv_size = sizeof(struct kallsym_iter),
  794. };
  795. static struct bpf_iter_reg ksym_iter_reg_info = {
  796. .target = "ksym",
  797. .feature = BPF_ITER_RESCHED,
  798. .ctx_arg_info_size = 1,
  799. .ctx_arg_info = {
  800. { offsetof(struct bpf_iter__ksym, ksym),
  801. PTR_TO_BTF_ID_OR_NULL },
  802. },
  803. .seq_info = &ksym_iter_seq_info,
  804. };
  805. BTF_ID_LIST(btf_ksym_iter_id)
  806. BTF_ID(struct, kallsym_iter)
  807. static int __init bpf_ksym_iter_register(void)
  808. {
  809. ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id;
  810. return bpf_iter_reg_target(&ksym_iter_reg_info);
  811. }
  812. late_initcall(bpf_ksym_iter_register);
  813. #endif /* CONFIG_BPF_SYSCALL */
  814. static inline int kallsyms_for_perf(void)
  815. {
  816. #ifdef CONFIG_PERF_EVENTS
  817. extern int sysctl_perf_event_paranoid;
  818. if (sysctl_perf_event_paranoid <= 1)
  819. return 1;
  820. #endif
  821. return 0;
  822. }
  823. /*
  824. * We show kallsyms information even to normal users if we've enabled
  825. * kernel profiling and are explicitly not paranoid (so kptr_restrict
  826. * is clear, and sysctl_perf_event_paranoid isn't set).
  827. *
  828. * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
  829. * block even that).
  830. */
  831. bool kallsyms_show_value(const struct cred *cred)
  832. {
  833. switch (kptr_restrict) {
  834. case 0:
  835. if (kallsyms_for_perf())
  836. return true;
  837. fallthrough;
  838. case 1:
  839. if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
  840. CAP_OPT_NOAUDIT) == 0)
  841. return true;
  842. fallthrough;
  843. default:
  844. return false;
  845. }
  846. }
  847. static int kallsyms_open(struct inode *inode, struct file *file)
  848. {
  849. /*
  850. * We keep iterator in m->private, since normal case is to
  851. * s_start from where we left off, so we avoid doing
  852. * using get_symbol_offset for every symbol.
  853. */
  854. struct kallsym_iter *iter;
  855. iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
  856. if (!iter)
  857. return -ENOMEM;
  858. reset_iter(iter, 0);
  859. /*
  860. * Instead of checking this on every s_show() call, cache
  861. * the result here at open time.
  862. */
  863. iter->show_value = kallsyms_show_value(file->f_cred);
  864. return 0;
  865. }
  866. #ifdef CONFIG_KGDB_KDB
  867. const char *kdb_walk_kallsyms(loff_t *pos)
  868. {
  869. static struct kallsym_iter kdb_walk_kallsyms_iter;
  870. if (*pos == 0) {
  871. memset(&kdb_walk_kallsyms_iter, 0,
  872. sizeof(kdb_walk_kallsyms_iter));
  873. reset_iter(&kdb_walk_kallsyms_iter, 0);
  874. }
  875. while (1) {
  876. if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
  877. return NULL;
  878. ++*pos;
  879. /* Some debugging symbols have no name. Ignore them. */
  880. if (kdb_walk_kallsyms_iter.name[0])
  881. return kdb_walk_kallsyms_iter.name;
  882. }
  883. }
  884. #endif /* CONFIG_KGDB_KDB */
  885. static const struct proc_ops kallsyms_proc_ops = {
  886. .proc_open = kallsyms_open,
  887. .proc_read = seq_read,
  888. .proc_lseek = seq_lseek,
  889. .proc_release = seq_release_private,
  890. };
  891. static int __init kallsyms_init(void)
  892. {
  893. proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops);
  894. return 0;
  895. }
  896. device_initcall(kallsyms_init);