usdt.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519
  1. // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2. /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
  3. #include <ctype.h>
  4. #include <stdio.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <libelf.h>
  8. #include <gelf.h>
  9. #include <unistd.h>
  10. #include <linux/ptrace.h>
  11. #include <linux/kernel.h>
  12. /* s8 will be marked as poison while it's a reg of riscv */
  13. #if defined(__riscv)
  14. #define rv_s8 s8
  15. #endif
  16. #include "bpf.h"
  17. #include "libbpf.h"
  18. #include "libbpf_common.h"
  19. #include "libbpf_internal.h"
  20. #include "hashmap.h"
  21. /* libbpf's USDT support consists of BPF-side state/code and user-space
  22. * state/code working together in concert. BPF-side parts are defined in
  23. * usdt.bpf.h header library. User-space state is encapsulated by struct
  24. * usdt_manager and all the supporting code centered around usdt_manager.
  25. *
  26. * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map
  27. * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that
  28. * don't support BPF cookie (see below). These two maps are implicitly
  29. * embedded into user's end BPF object file when user's code included
  30. * usdt.bpf.h. This means that libbpf doesn't do anything special to create
  31. * these USDT support maps. They are created by normal libbpf logic of
  32. * instantiating BPF maps when opening and loading BPF object.
  33. *
  34. * As such, libbpf is basically unaware of the need to do anything
  35. * USDT-related until the very first call to bpf_program__attach_usdt(), which
  36. * can be called by user explicitly or happen automatically during skeleton
  37. * attach (or, equivalently, through generic bpf_program__attach() call). At
  38. * this point, libbpf will instantiate and initialize struct usdt_manager and
  39. * store it in bpf_object. USDT manager is per-BPF object construct, as each
  40. * independent BPF object might or might not have USDT programs, and thus all
  41. * the expected USDT-related state. There is no coordination between two
  42. * bpf_object in parts of USDT attachment, they are oblivious of each other's
  43. * existence and libbpf is just oblivious, dealing with bpf_object-specific
  44. * USDT state.
  45. *
  46. * Quick crash course on USDTs.
  47. *
  48. * From user-space application's point of view, USDT is essentially just
  49. * a slightly special function call that normally has zero overhead, unless it
  50. * is being traced by some external entity (e.g, BPF-based tool). Here's how
  51. * a typical application can trigger USDT probe:
  52. *
  53. * #include <sys/sdt.h> // provided by systemtap-sdt-devel package
  54. * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h
  55. *
  56. * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
  57. *
  58. * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each
  59. * individual USDT has a fixed number of arguments (3 in the above example)
  60. * and specifies values of each argument as if it was a function call.
  61. *
  62. * USDT call is actually not a function call, but is instead replaced by
  63. * a single NOP instruction (thus zero overhead, effectively). But in addition
  64. * to that, those USDT macros generate special SHT_NOTE ELF records in
  65. * .note.stapsdt ELF section. Here's an example USDT definition as emitted by
  66. * `readelf -n <binary>`:
  67. *
  68. * stapsdt 0x00000089 NT_STAPSDT (SystemTap probe descriptors)
  69. * Provider: test
  70. * Name: usdt12
  71. * Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e
  72. * Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil
  73. *
  74. * In this case we have USDT test:usdt12 with 12 arguments.
  75. *
  76. * Location and base are offsets used to calculate absolute IP address of that
  77. * NOP instruction that kernel can replace with an interrupt instruction to
  78. * trigger instrumentation code (BPF program for all that we care about).
  79. *
  80. * Semaphore above is and optional feature. It records an address of a 2-byte
  81. * refcount variable (normally in '.probes' ELF section) used for signaling if
  82. * there is anything that is attached to USDT. This is useful for user
  83. * applications if, for example, they need to prepare some arguments that are
  84. * passed only to USDTs and preparation is expensive. By checking if USDT is
  85. * "activated", an application can avoid paying those costs unnecessarily.
  86. * Recent enough kernel has built-in support for automatically managing this
  87. * refcount, which libbpf expects and relies on. If USDT is defined without
  88. * associated semaphore, this value will be zero. See selftests for semaphore
  89. * examples.
  90. *
  91. * Arguments is the most interesting part. This USDT specification string is
  92. * providing information about all the USDT arguments and their locations. The
  93. * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and
  94. * whether the argument is signed or unsigned (negative size means signed).
  95. * The part after @ sign is assembly-like definition of argument location
  96. * (see [0] for more details). Technically, assembler can provide some pretty
  97. * advanced definitions, but libbpf is currently supporting three most common
  98. * cases:
  99. * 1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9);
  100. * 2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer
  101. * whose value is in register %rdx";
  102. * 3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which
  103. * specifies signed 32-bit integer stored at offset -1204 bytes from
  104. * memory address stored in %rbp.
  105. *
  106. * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
  107. *
  108. * During attachment, libbpf parses all the relevant USDT specifications and
  109. * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side
  110. * code through spec map. This allows BPF applications to quickly fetch the
  111. * actual value at runtime using a simple BPF-side code.
  112. *
  113. * With basics out of the way, let's go over less immediately obvious aspects
  114. * of supporting USDTs.
  115. *
  116. * First, there is no special USDT BPF program type. It is actually just
  117. * a uprobe BPF program (which for kernel, at least currently, is just a kprobe
  118. * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
  119. * that uprobe is usually attached at the function entry, while USDT will
  120. * normally will be somewhere inside the function. But it should always be
  121. * pointing to NOP instruction, which makes such uprobes the fastest uprobe
  122. * kind.
  123. *
  124. * Second, it's important to realize that such STAP_PROBEn(provider, name, ...)
  125. * macro invocations can end up being inlined many-many times, depending on
  126. * specifics of each individual user application. So single conceptual USDT
  127. * (identified by provider:name pair of identifiers) is, generally speaking,
  128. * multiple uprobe locations (USDT call sites) in different places in user
  129. * application. Further, again due to inlining, each USDT call site might end
  130. * up having the same argument #N be located in a different place. In one call
  131. * site it could be a constant, in another will end up in a register, and in
  132. * yet another could be some other register or even somewhere on the stack.
  133. *
  134. * As such, "attaching to USDT" means (in general case) attaching the same
  135. * uprobe BPF program to multiple target locations in user application, each
  136. * potentially having a completely different USDT spec associated with it.
  137. * To wire all this up together libbpf allocates a unique integer spec ID for
  138. * each unique USDT spec. Spec IDs are allocated as sequential small integers
  139. * so that they can be used as keys in array BPF map (for performance reasons).
  140. * Spec ID allocation and accounting is big part of what usdt_manager is
  141. * about. This state has to be maintained per-BPF object and coordinate
  142. * between different USDT attachments within the same BPF object.
  143. *
  144. * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out
  145. * as struct usdt_spec. Each invocation of BPF program at runtime needs to
  146. * know its associated spec ID. It gets it either through BPF cookie, which
  147. * libbpf sets to spec ID during attach time, or, if kernel is too old to
  148. * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
  149. * case. The latter means that some modes of operation can't be supported
  150. * without BPF cookie. Such mode is attaching to shared library "generically",
  151. * without specifying target process. In such case, it's impossible to
  152. * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
  153. * is not supported without BPF cookie support.
  154. *
  155. * Note that libbpf is using BPF cookie functionality for its own internal
  156. * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf
  157. * provides conceptually equivalent USDT cookie support. It's still u64
  158. * user-provided value that can be associated with USDT attachment. Note that
  159. * this will be the same value for all USDT call sites within the same single
  160. * *logical* USDT attachment. This makes sense because to user attaching to
  161. * USDT is a single BPF program triggered for singular USDT probe. The fact
  162. * that this is done at multiple actual locations is a mostly hidden
  163. * implementation details. This USDT cookie value can be fetched with
  164. * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h
  165. *
  166. * Lastly, while single USDT can have tons of USDT call sites, it doesn't
  167. * necessarily have that many different USDT specs. It very well might be
  168. * that 1000 USDT call sites only need 5 different USDT specs, because all the
  169. * arguments are typically contained in a small set of registers or stack
  170. * locations. As such, it's wasteful to allocate as many USDT spec IDs as
  171. * there are USDT call sites. So libbpf tries to be frugal and performs
  172. * on-the-fly deduplication during a single USDT attachment to only allocate
  173. * the minimal required amount of unique USDT specs (and thus spec IDs). This
  174. * is trivially achieved by using USDT spec string (Arguments string from USDT
  175. * note) as a lookup key in a hashmap. USDT spec string uniquely defines
  176. * everything about how to fetch USDT arguments, so two USDT call sites
  177. * sharing USDT spec string can safely share the same USDT spec and spec ID.
  178. * Note, this spec string deduplication is happening only during the same USDT
  179. * attachment, so each USDT spec shares the same USDT cookie value. This is
  180. * not generally true for other USDT attachments within the same BPF object,
  181. * as even if USDT spec string is the same, USDT cookie value can be
  182. * different. It was deemed excessive to try to deduplicate across independent
  183. * USDT attachments by taking into account USDT spec string *and* USDT cookie
  184. * value, which would complicated spec ID accounting significantly for little
  185. * gain.
  186. */
  187. #define USDT_BASE_SEC ".stapsdt.base"
  188. #define USDT_SEMA_SEC ".probes"
  189. #define USDT_NOTE_SEC ".note.stapsdt"
  190. #define USDT_NOTE_TYPE 3
  191. #define USDT_NOTE_NAME "stapsdt"
  192. /* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */
  193. enum usdt_arg_type {
  194. USDT_ARG_CONST,
  195. USDT_ARG_REG,
  196. USDT_ARG_REG_DEREF,
  197. };
  198. /* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
  199. struct usdt_arg_spec {
  200. __u64 val_off;
  201. enum usdt_arg_type arg_type;
  202. short reg_off;
  203. bool arg_signed;
  204. char arg_bitshift;
  205. };
  206. /* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */
  207. #define USDT_MAX_ARG_CNT 12
  208. /* should match struct __bpf_usdt_spec from usdt.bpf.h */
  209. struct usdt_spec {
  210. struct usdt_arg_spec args[USDT_MAX_ARG_CNT];
  211. __u64 usdt_cookie;
  212. short arg_cnt;
  213. };
  214. struct usdt_note {
  215. const char *provider;
  216. const char *name;
  217. /* USDT args specification string, e.g.:
  218. * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx"
  219. */
  220. const char *args;
  221. long loc_addr;
  222. long base_addr;
  223. long sema_addr;
  224. };
  225. struct usdt_target {
  226. long abs_ip;
  227. long rel_ip;
  228. long sema_off;
  229. struct usdt_spec spec;
  230. const char *spec_str;
  231. };
  232. struct usdt_manager {
  233. struct bpf_map *specs_map;
  234. struct bpf_map *ip_to_spec_id_map;
  235. int *free_spec_ids;
  236. size_t free_spec_cnt;
  237. size_t next_free_spec_id;
  238. bool has_bpf_cookie;
  239. bool has_sema_refcnt;
  240. };
  241. struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
  242. {
  243. static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset";
  244. struct usdt_manager *man;
  245. struct bpf_map *specs_map, *ip_to_spec_id_map;
  246. specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs");
  247. ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id");
  248. if (!specs_map || !ip_to_spec_id_map) {
  249. pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n");
  250. return ERR_PTR(-ESRCH);
  251. }
  252. man = calloc(1, sizeof(*man));
  253. if (!man)
  254. return ERR_PTR(-ENOMEM);
  255. man->specs_map = specs_map;
  256. man->ip_to_spec_id_map = ip_to_spec_id_map;
  257. /* Detect if BPF cookie is supported for kprobes.
  258. * We don't need IP-to-ID mapping if we can use BPF cookies.
  259. * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
  260. */
  261. man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE);
  262. /* Detect kernel support for automatic refcounting of USDT semaphore.
  263. * If this is not supported, USDTs with semaphores will not be supported.
  264. * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
  265. */
  266. man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0;
  267. return man;
  268. }
  269. void usdt_manager_free(struct usdt_manager *man)
  270. {
  271. if (IS_ERR_OR_NULL(man))
  272. return;
  273. free(man->free_spec_ids);
  274. free(man);
  275. }
  276. static int sanity_check_usdt_elf(Elf *elf, const char *path)
  277. {
  278. GElf_Ehdr ehdr;
  279. int endianness;
  280. if (elf_kind(elf) != ELF_K_ELF) {
  281. pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path);
  282. return -EBADF;
  283. }
  284. switch (gelf_getclass(elf)) {
  285. case ELFCLASS64:
  286. if (sizeof(void *) != 8) {
  287. pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path);
  288. return -EBADF;
  289. }
  290. break;
  291. case ELFCLASS32:
  292. if (sizeof(void *) != 4) {
  293. pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path);
  294. return -EBADF;
  295. }
  296. break;
  297. default:
  298. pr_warn("usdt: unsupported ELF class for '%s'\n", path);
  299. return -EBADF;
  300. }
  301. if (!gelf_getehdr(elf, &ehdr))
  302. return -EINVAL;
  303. if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) {
  304. pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n",
  305. path, ehdr.e_type);
  306. return -EBADF;
  307. }
  308. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  309. endianness = ELFDATA2LSB;
  310. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  311. endianness = ELFDATA2MSB;
  312. #else
  313. # error "Unrecognized __BYTE_ORDER__"
  314. #endif
  315. if (endianness != ehdr.e_ident[EI_DATA]) {
  316. pr_warn("usdt: ELF endianness mismatch for '%s'\n", path);
  317. return -EBADF;
  318. }
  319. return 0;
  320. }
  321. static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn)
  322. {
  323. Elf_Scn *sec = NULL;
  324. size_t shstrndx;
  325. if (elf_getshdrstrndx(elf, &shstrndx))
  326. return -EINVAL;
  327. /* check if ELF is corrupted and avoid calling elf_strptr if yes */
  328. if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL))
  329. return -EINVAL;
  330. while ((sec = elf_nextscn(elf, sec)) != NULL) {
  331. char *name;
  332. if (!gelf_getshdr(sec, shdr))
  333. return -EINVAL;
  334. name = elf_strptr(elf, shstrndx, shdr->sh_name);
  335. if (name && strcmp(sec_name, name) == 0) {
  336. *scn = sec;
  337. return 0;
  338. }
  339. }
  340. return -ENOENT;
  341. }
  342. struct elf_seg {
  343. long start;
  344. long end;
  345. long offset;
  346. bool is_exec;
  347. };
  348. static int cmp_elf_segs(const void *_a, const void *_b)
  349. {
  350. const struct elf_seg *a = _a;
  351. const struct elf_seg *b = _b;
  352. return a->start < b->start ? -1 : 1;
  353. }
  354. static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt)
  355. {
  356. GElf_Phdr phdr;
  357. size_t n;
  358. int i, err;
  359. struct elf_seg *seg;
  360. void *tmp;
  361. *seg_cnt = 0;
  362. if (elf_getphdrnum(elf, &n)) {
  363. err = -errno;
  364. return err;
  365. }
  366. for (i = 0; i < n; i++) {
  367. if (!gelf_getphdr(elf, i, &phdr)) {
  368. err = -errno;
  369. return err;
  370. }
  371. pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n",
  372. i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset,
  373. (long)phdr.p_type, (long)phdr.p_flags);
  374. if (phdr.p_type != PT_LOAD)
  375. continue;
  376. tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
  377. if (!tmp)
  378. return -ENOMEM;
  379. *segs = tmp;
  380. seg = *segs + *seg_cnt;
  381. (*seg_cnt)++;
  382. seg->start = phdr.p_vaddr;
  383. seg->end = phdr.p_vaddr + phdr.p_memsz;
  384. seg->offset = phdr.p_offset;
  385. seg->is_exec = phdr.p_flags & PF_X;
  386. }
  387. if (*seg_cnt == 0) {
  388. pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path);
  389. return -ESRCH;
  390. }
  391. qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
  392. return 0;
  393. }
  394. static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
  395. {
  396. char path[PATH_MAX], line[PATH_MAX], mode[16];
  397. size_t seg_start, seg_end, seg_off;
  398. struct elf_seg *seg;
  399. int tmp_pid, i, err;
  400. FILE *f;
  401. *seg_cnt = 0;
  402. /* Handle containerized binaries only accessible from
  403. * /proc/<pid>/root/<path>. They will be reported as just /<path> in
  404. * /proc/<pid>/maps.
  405. */
  406. if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid)
  407. goto proceed;
  408. if (!realpath(lib_path, path)) {
  409. pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n",
  410. lib_path, -errno);
  411. libbpf_strlcpy(path, lib_path, sizeof(path));
  412. }
  413. proceed:
  414. sprintf(line, "/proc/%d/maps", pid);
  415. f = fopen(line, "r");
  416. if (!f) {
  417. err = -errno;
  418. pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n",
  419. line, lib_path, err);
  420. return err;
  421. }
  422. /* We need to handle lines with no path at the end:
  423. *
  424. * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613 /usr/lib64/libc-2.17.so
  425. * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0
  426. * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598 /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so
  427. */
  428. while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n",
  429. &seg_start, &seg_end, mode, &seg_off, line) == 5) {
  430. void *tmp;
  431. /* to handle no path case (see above) we need to capture line
  432. * without skipping any whitespaces. So we need to strip
  433. * leading whitespaces manually here
  434. */
  435. i = 0;
  436. while (isblank(line[i]))
  437. i++;
  438. if (strcmp(line + i, path) != 0)
  439. continue;
  440. pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n",
  441. path, seg_start, seg_end, mode, seg_off);
  442. /* ignore non-executable sections for shared libs */
  443. if (mode[2] != 'x')
  444. continue;
  445. tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
  446. if (!tmp) {
  447. err = -ENOMEM;
  448. goto err_out;
  449. }
  450. *segs = tmp;
  451. seg = *segs + *seg_cnt;
  452. *seg_cnt += 1;
  453. seg->start = seg_start;
  454. seg->end = seg_end;
  455. seg->offset = seg_off;
  456. seg->is_exec = true;
  457. }
  458. if (*seg_cnt == 0) {
  459. pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n",
  460. lib_path, path, pid);
  461. err = -ESRCH;
  462. goto err_out;
  463. }
  464. qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
  465. err = 0;
  466. err_out:
  467. fclose(f);
  468. return err;
  469. }
  470. static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr)
  471. {
  472. struct elf_seg *seg;
  473. int i;
  474. /* for ELF binaries (both executables and shared libraries), we are
  475. * given virtual address (absolute for executables, relative for
  476. * libraries) which should match address range of [seg_start, seg_end)
  477. */
  478. for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
  479. if (seg->start <= virtaddr && virtaddr < seg->end)
  480. return seg;
  481. }
  482. return NULL;
  483. }
  484. static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset)
  485. {
  486. struct elf_seg *seg;
  487. int i;
  488. /* for VMA segments from /proc/<pid>/maps file, provided "address" is
  489. * actually a file offset, so should be fall within logical
  490. * offset-based range of [offset_start, offset_end)
  491. */
  492. for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
  493. if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start))
  494. return seg;
  495. }
  496. return NULL;
  497. }
  498. static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
  499. const char *data, size_t name_off, size_t desc_off,
  500. struct usdt_note *usdt_note);
  501. static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
  502. static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid,
  503. const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
  504. struct usdt_target **out_targets, size_t *out_target_cnt)
  505. {
  506. size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
  507. struct elf_seg *segs = NULL, *vma_segs = NULL;
  508. struct usdt_target *targets = NULL, *target;
  509. long base_addr = 0;
  510. Elf_Scn *notes_scn, *base_scn;
  511. GElf_Shdr base_shdr, notes_shdr;
  512. GElf_Ehdr ehdr;
  513. GElf_Nhdr nhdr;
  514. Elf_Data *data;
  515. int err;
  516. *out_targets = NULL;
  517. *out_target_cnt = 0;
  518. err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, &notes_shdr, &notes_scn);
  519. if (err) {
  520. pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path);
  521. return err;
  522. }
  523. if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) {
  524. pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path);
  525. return -EINVAL;
  526. }
  527. err = parse_elf_segs(elf, path, &segs, &seg_cnt);
  528. if (err) {
  529. pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err);
  530. goto err_out;
  531. }
  532. /* .stapsdt.base ELF section is optional, but is used for prelink
  533. * offset compensation (see a big comment further below)
  534. */
  535. if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0)
  536. base_addr = base_shdr.sh_addr;
  537. data = elf_getdata(notes_scn, 0);
  538. off = 0;
  539. while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) {
  540. long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0;
  541. struct usdt_note note;
  542. struct elf_seg *seg = NULL;
  543. void *tmp;
  544. err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, &note);
  545. if (err)
  546. goto err_out;
  547. if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0)
  548. continue;
  549. /* We need to compensate "prelink effect". See [0] for details,
  550. * relevant parts quoted here:
  551. *
  552. * Each SDT probe also expands into a non-allocated ELF note. You can
  553. * find this by looking at SHT_NOTE sections and decoding the format;
  554. * see below for details. Because the note is non-allocated, it means
  555. * there is no runtime cost, and also preserved in both stripped files
  556. * and .debug files.
  557. *
  558. * However, this means that prelink won't adjust the note's contents
  559. * for address offsets. Instead, this is done via the .stapsdt.base
  560. * section. This is a special section that is added to the text. We
  561. * will only ever have one of these sections in a final link and it
  562. * will only ever be one byte long. Nothing about this section itself
  563. * matters, we just use it as a marker to detect prelink address
  564. * adjustments.
  565. *
  566. * Each probe note records the link-time address of the .stapsdt.base
  567. * section alongside the probe PC address. The decoder compares the
  568. * base address stored in the note with the .stapsdt.base section's
  569. * sh_addr. Initially these are the same, but the section header will
  570. * be adjusted by prelink. So the decoder applies the difference to
  571. * the probe PC address to get the correct prelinked PC address; the
  572. * same adjustment is applied to the semaphore address, if any.
  573. *
  574. * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
  575. */
  576. usdt_abs_ip = note.loc_addr;
  577. if (base_addr)
  578. usdt_abs_ip += base_addr - note.base_addr;
  579. /* When attaching uprobes (which is what USDTs basically are)
  580. * kernel expects file offset to be specified, not a relative
  581. * virtual address, so we need to translate virtual address to
  582. * file offset, for both ET_EXEC and ET_DYN binaries.
  583. */
  584. seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip);
  585. if (!seg) {
  586. err = -ESRCH;
  587. pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
  588. usdt_provider, usdt_name, path, usdt_abs_ip);
  589. goto err_out;
  590. }
  591. if (!seg->is_exec) {
  592. err = -ESRCH;
  593. pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
  594. path, seg->start, seg->end, usdt_provider, usdt_name,
  595. usdt_abs_ip);
  596. goto err_out;
  597. }
  598. /* translate from virtual address to file offset */
  599. usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset;
  600. if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) {
  601. /* If we don't have BPF cookie support but need to
  602. * attach to a shared library, we'll need to know and
  603. * record absolute addresses of attach points due to
  604. * the need to lookup USDT spec by absolute IP of
  605. * triggered uprobe. Doing this resolution is only
  606. * possible when we have a specific PID of the process
  607. * that's using specified shared library. BPF cookie
  608. * removes the absolute address limitation as we don't
  609. * need to do this lookup (we just use BPF cookie as
  610. * an index of USDT spec), so for newer kernels with
  611. * BPF cookie support libbpf supports USDT attachment
  612. * to shared libraries with no PID filter.
  613. */
  614. if (pid < 0) {
  615. pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n");
  616. err = -ENOTSUP;
  617. goto err_out;
  618. }
  619. /* vma_segs are lazily initialized only if necessary */
  620. if (vma_seg_cnt == 0) {
  621. err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
  622. if (err) {
  623. pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n",
  624. pid, path, err);
  625. goto err_out;
  626. }
  627. }
  628. seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip);
  629. if (!seg) {
  630. err = -ESRCH;
  631. pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
  632. usdt_provider, usdt_name, path, usdt_rel_ip);
  633. goto err_out;
  634. }
  635. usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip;
  636. }
  637. pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
  638. usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path,
  639. note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
  640. seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
  641. /* Adjust semaphore address to be a file offset */
  642. if (note.sema_addr) {
  643. if (!man->has_sema_refcnt) {
  644. pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
  645. usdt_provider, usdt_name, path);
  646. err = -ENOTSUP;
  647. goto err_out;
  648. }
  649. seg = find_elf_seg(segs, seg_cnt, note.sema_addr);
  650. if (!seg) {
  651. err = -ESRCH;
  652. pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
  653. usdt_provider, usdt_name, path, note.sema_addr);
  654. goto err_out;
  655. }
  656. if (seg->is_exec) {
  657. err = -ESRCH;
  658. pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n",
  659. path, seg->start, seg->end, usdt_provider, usdt_name,
  660. note.sema_addr);
  661. goto err_out;
  662. }
  663. usdt_sema_off = note.sema_addr - seg->start + seg->offset;
  664. pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
  665. usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
  666. path, note.sema_addr, note.base_addr, usdt_sema_off,
  667. seg->start, seg->end, seg->offset);
  668. }
  669. /* Record adjusted addresses and offsets and parse USDT spec */
  670. tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets));
  671. if (!tmp) {
  672. err = -ENOMEM;
  673. goto err_out;
  674. }
  675. targets = tmp;
  676. target = &targets[target_cnt];
  677. memset(target, 0, sizeof(*target));
  678. target->abs_ip = usdt_abs_ip;
  679. target->rel_ip = usdt_rel_ip;
  680. target->sema_off = usdt_sema_off;
  681. /* notes.args references strings from Elf itself, so they can
  682. * be referenced safely until elf_end() call
  683. */
  684. target->spec_str = note.args;
  685. err = parse_usdt_spec(&target->spec, &note, usdt_cookie);
  686. if (err)
  687. goto err_out;
  688. target_cnt++;
  689. }
  690. *out_targets = targets;
  691. *out_target_cnt = target_cnt;
  692. err = target_cnt;
  693. err_out:
  694. free(segs);
  695. free(vma_segs);
  696. if (err < 0)
  697. free(targets);
  698. return err;
  699. }
  700. struct bpf_link_usdt {
  701. struct bpf_link link;
  702. struct usdt_manager *usdt_man;
  703. size_t spec_cnt;
  704. int *spec_ids;
  705. size_t uprobe_cnt;
  706. struct {
  707. long abs_ip;
  708. struct bpf_link *link;
  709. } *uprobes;
  710. };
  711. static int bpf_link_usdt_detach(struct bpf_link *link)
  712. {
  713. struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
  714. struct usdt_manager *man = usdt_link->usdt_man;
  715. int i;
  716. for (i = 0; i < usdt_link->uprobe_cnt; i++) {
  717. /* detach underlying uprobe link */
  718. bpf_link__destroy(usdt_link->uprobes[i].link);
  719. /* there is no need to update specs map because it will be
  720. * unconditionally overwritten on subsequent USDT attaches,
  721. * but if BPF cookies are not used we need to remove entry
  722. * from ip_to_spec_id map, otherwise we'll run into false
  723. * conflicting IP errors
  724. */
  725. if (!man->has_bpf_cookie) {
  726. /* not much we can do about errors here */
  727. (void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map),
  728. &usdt_link->uprobes[i].abs_ip);
  729. }
  730. }
  731. /* try to return the list of previously used spec IDs to usdt_manager
  732. * for future reuse for subsequent USDT attaches
  733. */
  734. if (!man->free_spec_ids) {
  735. /* if there were no free spec IDs yet, just transfer our IDs */
  736. man->free_spec_ids = usdt_link->spec_ids;
  737. man->free_spec_cnt = usdt_link->spec_cnt;
  738. usdt_link->spec_ids = NULL;
  739. } else {
  740. /* otherwise concat IDs */
  741. size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt;
  742. int *new_free_ids;
  743. new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt,
  744. sizeof(*new_free_ids));
  745. /* If we couldn't resize free_spec_ids, we'll just leak
  746. * a bunch of free IDs; this is very unlikely to happen and if
  747. * system is so exhausted on memory, it's the least of user's
  748. * concerns, probably.
  749. * So just do our best here to return those IDs to usdt_manager.
  750. * Another edge case when we can legitimately get NULL is when
  751. * new_cnt is zero, which can happen in some edge cases, so we
  752. * need to be careful about that.
  753. */
  754. if (new_free_ids || new_cnt == 0) {
  755. memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
  756. usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
  757. man->free_spec_ids = new_free_ids;
  758. man->free_spec_cnt = new_cnt;
  759. }
  760. }
  761. return 0;
  762. }
  763. static void bpf_link_usdt_dealloc(struct bpf_link *link)
  764. {
  765. struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
  766. free(usdt_link->spec_ids);
  767. free(usdt_link->uprobes);
  768. free(usdt_link);
  769. }
  770. static size_t specs_hash_fn(const void *key, void *ctx)
  771. {
  772. const char *s = key;
  773. return str_hash(s);
  774. }
  775. static bool specs_equal_fn(const void *key1, const void *key2, void *ctx)
  776. {
  777. const char *s1 = key1;
  778. const char *s2 = key2;
  779. return strcmp(s1, s2) == 0;
  780. }
  781. static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash,
  782. struct bpf_link_usdt *link, struct usdt_target *target,
  783. int *spec_id, bool *is_new)
  784. {
  785. void *tmp;
  786. int err;
  787. /* check if we already allocated spec ID for this spec string */
  788. if (hashmap__find(specs_hash, target->spec_str, &tmp)) {
  789. *spec_id = (long)tmp;
  790. *is_new = false;
  791. return 0;
  792. }
  793. /* otherwise it's a new ID that needs to be set up in specs map and
  794. * returned back to usdt_manager when USDT link is detached
  795. */
  796. tmp = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
  797. if (!tmp)
  798. return -ENOMEM;
  799. link->spec_ids = tmp;
  800. /* get next free spec ID, giving preference to free list, if not empty */
  801. if (man->free_spec_cnt) {
  802. *spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
  803. /* cache spec ID for current spec string for future lookups */
  804. err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
  805. if (err)
  806. return err;
  807. man->free_spec_cnt--;
  808. } else {
  809. /* don't allocate spec ID bigger than what fits in specs map */
  810. if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map))
  811. return -E2BIG;
  812. *spec_id = man->next_free_spec_id;
  813. /* cache spec ID for current spec string for future lookups */
  814. err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
  815. if (err)
  816. return err;
  817. man->next_free_spec_id++;
  818. }
  819. /* remember new spec ID in the link for later return back to free list on detach */
  820. link->spec_ids[link->spec_cnt] = *spec_id;
  821. link->spec_cnt++;
  822. *is_new = true;
  823. return 0;
  824. }
  825. struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog,
  826. pid_t pid, const char *path,
  827. const char *usdt_provider, const char *usdt_name,
  828. __u64 usdt_cookie)
  829. {
  830. int i, fd, err, spec_map_fd, ip_map_fd;
  831. LIBBPF_OPTS(bpf_uprobe_opts, opts);
  832. struct hashmap *specs_hash = NULL;
  833. struct bpf_link_usdt *link = NULL;
  834. struct usdt_target *targets = NULL;
  835. size_t target_cnt;
  836. Elf *elf;
  837. spec_map_fd = bpf_map__fd(man->specs_map);
  838. ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
  839. /* TODO: perform path resolution similar to uprobe's */
  840. fd = open(path, O_RDONLY);
  841. if (fd < 0) {
  842. err = -errno;
  843. pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err);
  844. return libbpf_err_ptr(err);
  845. }
  846. elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
  847. if (!elf) {
  848. err = -EBADF;
  849. pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1));
  850. goto err_out;
  851. }
  852. err = sanity_check_usdt_elf(elf, path);
  853. if (err)
  854. goto err_out;
  855. /* normalize PID filter */
  856. if (pid < 0)
  857. pid = -1;
  858. else if (pid == 0)
  859. pid = getpid();
  860. /* discover USDT in given binary, optionally limiting
  861. * activations to a given PID, if pid > 0
  862. */
  863. err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name,
  864. usdt_cookie, &targets, &target_cnt);
  865. if (err <= 0) {
  866. err = (err == 0) ? -ENOENT : err;
  867. goto err_out;
  868. }
  869. specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL);
  870. if (IS_ERR(specs_hash)) {
  871. err = PTR_ERR(specs_hash);
  872. goto err_out;
  873. }
  874. link = calloc(1, sizeof(*link));
  875. if (!link) {
  876. err = -ENOMEM;
  877. goto err_out;
  878. }
  879. link->usdt_man = man;
  880. link->link.detach = &bpf_link_usdt_detach;
  881. link->link.dealloc = &bpf_link_usdt_dealloc;
  882. link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
  883. if (!link->uprobes) {
  884. err = -ENOMEM;
  885. goto err_out;
  886. }
  887. for (i = 0; i < target_cnt; i++) {
  888. struct usdt_target *target = &targets[i];
  889. struct bpf_link *uprobe_link;
  890. bool is_new;
  891. int spec_id;
  892. /* Spec ID can be either reused or newly allocated. If it is
  893. * newly allocated, we'll need to fill out spec map, otherwise
  894. * entire spec should be valid and can be just used by a new
  895. * uprobe. We reuse spec when USDT arg spec is identical. We
  896. * also never share specs between two different USDT
  897. * attachments ("links"), so all the reused specs already
  898. * share USDT cookie value implicitly.
  899. */
  900. err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new);
  901. if (err)
  902. goto err_out;
  903. if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
  904. err = -errno;
  905. pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n",
  906. spec_id, usdt_provider, usdt_name, path, err);
  907. goto err_out;
  908. }
  909. if (!man->has_bpf_cookie &&
  910. bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) {
  911. err = -errno;
  912. if (err == -EEXIST) {
  913. pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
  914. spec_id, usdt_provider, usdt_name, path);
  915. } else {
  916. pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n",
  917. target->abs_ip, spec_id, usdt_provider, usdt_name,
  918. path, err);
  919. }
  920. goto err_out;
  921. }
  922. opts.ref_ctr_offset = target->sema_off;
  923. opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
  924. uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
  925. target->rel_ip, &opts);
  926. err = libbpf_get_error(uprobe_link);
  927. if (err) {
  928. pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n",
  929. i, usdt_provider, usdt_name, path, err);
  930. goto err_out;
  931. }
  932. link->uprobes[i].link = uprobe_link;
  933. link->uprobes[i].abs_ip = target->abs_ip;
  934. link->uprobe_cnt++;
  935. }
  936. free(targets);
  937. hashmap__free(specs_hash);
  938. elf_end(elf);
  939. close(fd);
  940. return &link->link;
  941. err_out:
  942. if (link)
  943. bpf_link__destroy(&link->link);
  944. free(targets);
  945. hashmap__free(specs_hash);
  946. if (elf)
  947. elf_end(elf);
  948. close(fd);
  949. return libbpf_err_ptr(err);
  950. }
  951. /* Parse out USDT ELF note from '.note.stapsdt' section.
  952. * Logic inspired by perf's code.
  953. */
  954. static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
  955. const char *data, size_t name_off, size_t desc_off,
  956. struct usdt_note *note)
  957. {
  958. const char *provider, *name, *args;
  959. long addrs[3];
  960. size_t len;
  961. /* sanity check USDT note name and type first */
  962. if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0)
  963. return -EINVAL;
  964. if (nhdr->n_type != USDT_NOTE_TYPE)
  965. return -EINVAL;
  966. /* sanity check USDT note contents ("description" in ELF terminology) */
  967. len = nhdr->n_descsz;
  968. data = data + desc_off;
  969. /* +3 is the very minimum required to store three empty strings */
  970. if (len < sizeof(addrs) + 3)
  971. return -EINVAL;
  972. /* get location, base, and semaphore addrs */
  973. memcpy(&addrs, data, sizeof(addrs));
  974. /* parse string fields: provider, name, args */
  975. provider = data + sizeof(addrs);
  976. name = (const char *)memchr(provider, '\0', data + len - provider);
  977. if (!name) /* non-zero-terminated provider */
  978. return -EINVAL;
  979. name++;
  980. if (name >= data + len || *name == '\0') /* missing or empty name */
  981. return -EINVAL;
  982. args = memchr(name, '\0', data + len - name);
  983. if (!args) /* non-zero-terminated name */
  984. return -EINVAL;
  985. ++args;
  986. if (args >= data + len) /* missing arguments spec */
  987. return -EINVAL;
  988. note->provider = provider;
  989. note->name = name;
  990. if (*args == '\0' || *args == ':')
  991. note->args = "";
  992. else
  993. note->args = args;
  994. note->loc_addr = addrs[0];
  995. note->base_addr = addrs[1];
  996. note->sema_addr = addrs[2];
  997. return 0;
  998. }
  999. static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg);
  1000. static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
  1001. {
  1002. const char *s;
  1003. int len;
  1004. spec->usdt_cookie = usdt_cookie;
  1005. spec->arg_cnt = 0;
  1006. s = note->args;
  1007. while (s[0]) {
  1008. if (spec->arg_cnt >= USDT_MAX_ARG_CNT) {
  1009. pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n",
  1010. USDT_MAX_ARG_CNT, note->provider, note->name, note->args);
  1011. return -E2BIG;
  1012. }
  1013. len = parse_usdt_arg(s, spec->arg_cnt, &spec->args[spec->arg_cnt]);
  1014. if (len < 0)
  1015. return len;
  1016. s += len;
  1017. spec->arg_cnt++;
  1018. }
  1019. return 0;
  1020. }
  1021. /* Architecture-specific logic for parsing USDT argument location specs */
  1022. #if defined(__x86_64__) || defined(__i386__)
  1023. static int calc_pt_regs_off(const char *reg_name)
  1024. {
  1025. static struct {
  1026. const char *names[4];
  1027. size_t pt_regs_off;
  1028. } reg_map[] = {
  1029. #ifdef __x86_64__
  1030. #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64)
  1031. #else
  1032. #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32)
  1033. #endif
  1034. { {"rip", "eip", "", ""}, reg_off(rip, eip) },
  1035. { {"rax", "eax", "ax", "al"}, reg_off(rax, eax) },
  1036. { {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) },
  1037. { {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) },
  1038. { {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) },
  1039. { {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) },
  1040. { {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) },
  1041. { {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) },
  1042. { {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) },
  1043. #undef reg_off
  1044. #ifdef __x86_64__
  1045. { {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) },
  1046. { {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) },
  1047. { {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) },
  1048. { {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) },
  1049. { {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) },
  1050. { {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) },
  1051. { {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) },
  1052. { {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) },
  1053. #endif
  1054. };
  1055. int i, j;
  1056. for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
  1057. for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) {
  1058. if (strcmp(reg_name, reg_map[i].names[j]) == 0)
  1059. return reg_map[i].pt_regs_off;
  1060. }
  1061. }
  1062. pr_warn("usdt: unrecognized register '%s'\n", reg_name);
  1063. return -ENOENT;
  1064. }
  1065. static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
  1066. {
  1067. char *reg_name = NULL;
  1068. int arg_sz, len, reg_off;
  1069. long off;
  1070. if (sscanf(arg_str, " %d @ %ld ( %%%m[^)] ) %n", &arg_sz, &off, &reg_name, &len) == 3) {
  1071. /* Memory dereference case, e.g., -4@-20(%rbp) */
  1072. arg->arg_type = USDT_ARG_REG_DEREF;
  1073. arg->val_off = off;
  1074. reg_off = calc_pt_regs_off(reg_name);
  1075. free(reg_name);
  1076. if (reg_off < 0)
  1077. return reg_off;
  1078. arg->reg_off = reg_off;
  1079. } else if (sscanf(arg_str, " %d @ %%%ms %n", &arg_sz, &reg_name, &len) == 2) {
  1080. /* Register read case, e.g., -4@%eax */
  1081. arg->arg_type = USDT_ARG_REG;
  1082. arg->val_off = 0;
  1083. reg_off = calc_pt_regs_off(reg_name);
  1084. free(reg_name);
  1085. if (reg_off < 0)
  1086. return reg_off;
  1087. arg->reg_off = reg_off;
  1088. } else if (sscanf(arg_str, " %d @ $%ld %n", &arg_sz, &off, &len) == 2) {
  1089. /* Constant value case, e.g., 4@$71 */
  1090. arg->arg_type = USDT_ARG_CONST;
  1091. arg->val_off = off;
  1092. arg->reg_off = 0;
  1093. } else {
  1094. pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
  1095. return -EINVAL;
  1096. }
  1097. arg->arg_signed = arg_sz < 0;
  1098. if (arg_sz < 0)
  1099. arg_sz = -arg_sz;
  1100. switch (arg_sz) {
  1101. case 1: case 2: case 4: case 8:
  1102. arg->arg_bitshift = 64 - arg_sz * 8;
  1103. break;
  1104. default:
  1105. pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
  1106. arg_num, arg_str, arg_sz);
  1107. return -EINVAL;
  1108. }
  1109. return len;
  1110. }
  1111. #elif defined(__s390x__)
  1112. /* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
  1113. static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
  1114. {
  1115. unsigned int reg;
  1116. int arg_sz, len;
  1117. long off;
  1118. if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", &arg_sz, &off, &reg, &len) == 3) {
  1119. /* Memory dereference case, e.g., -2@-28(%r15) */
  1120. arg->arg_type = USDT_ARG_REG_DEREF;
  1121. arg->val_off = off;
  1122. if (reg > 15) {
  1123. pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
  1124. return -EINVAL;
  1125. }
  1126. arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
  1127. } else if (sscanf(arg_str, " %d @ %%r%u %n", &arg_sz, &reg, &len) == 2) {
  1128. /* Register read case, e.g., -8@%r0 */
  1129. arg->arg_type = USDT_ARG_REG;
  1130. arg->val_off = 0;
  1131. if (reg > 15) {
  1132. pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
  1133. return -EINVAL;
  1134. }
  1135. arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
  1136. } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
  1137. /* Constant value case, e.g., 4@71 */
  1138. arg->arg_type = USDT_ARG_CONST;
  1139. arg->val_off = off;
  1140. arg->reg_off = 0;
  1141. } else {
  1142. pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
  1143. return -EINVAL;
  1144. }
  1145. arg->arg_signed = arg_sz < 0;
  1146. if (arg_sz < 0)
  1147. arg_sz = -arg_sz;
  1148. switch (arg_sz) {
  1149. case 1: case 2: case 4: case 8:
  1150. arg->arg_bitshift = 64 - arg_sz * 8;
  1151. break;
  1152. default:
  1153. pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
  1154. arg_num, arg_str, arg_sz);
  1155. return -EINVAL;
  1156. }
  1157. return len;
  1158. }
  1159. #elif defined(__aarch64__)
  1160. static int calc_pt_regs_off(const char *reg_name)
  1161. {
  1162. int reg_num;
  1163. if (sscanf(reg_name, "x%d", &reg_num) == 1) {
  1164. if (reg_num >= 0 && reg_num < 31)
  1165. return offsetof(struct user_pt_regs, regs[reg_num]);
  1166. } else if (strcmp(reg_name, "sp") == 0) {
  1167. return offsetof(struct user_pt_regs, sp);
  1168. }
  1169. pr_warn("usdt: unrecognized register '%s'\n", reg_name);
  1170. return -ENOENT;
  1171. }
  1172. static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
  1173. {
  1174. char reg_name[16];
  1175. int arg_sz, len, reg_off;
  1176. long off;
  1177. if (sscanf(arg_str, " %d @ \[ %15[a-z0-9], %ld ] %n", &arg_sz, reg_name, &off, &len) == 3) {
  1178. /* Memory dereference case, e.g., -4@[sp, 96] */
  1179. arg->arg_type = USDT_ARG_REG_DEREF;
  1180. arg->val_off = off;
  1181. reg_off = calc_pt_regs_off(reg_name);
  1182. if (reg_off < 0)
  1183. return reg_off;
  1184. arg->reg_off = reg_off;
  1185. } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", &arg_sz, reg_name, &len) == 2) {
  1186. /* Memory dereference case, e.g., -4@[sp] */
  1187. arg->arg_type = USDT_ARG_REG_DEREF;
  1188. arg->val_off = 0;
  1189. reg_off = calc_pt_regs_off(reg_name);
  1190. if (reg_off < 0)
  1191. return reg_off;
  1192. arg->reg_off = reg_off;
  1193. } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
  1194. /* Constant value case, e.g., 4@5 */
  1195. arg->arg_type = USDT_ARG_CONST;
  1196. arg->val_off = off;
  1197. arg->reg_off = 0;
  1198. } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) {
  1199. /* Register read case, e.g., -8@x4 */
  1200. arg->arg_type = USDT_ARG_REG;
  1201. arg->val_off = 0;
  1202. reg_off = calc_pt_regs_off(reg_name);
  1203. if (reg_off < 0)
  1204. return reg_off;
  1205. arg->reg_off = reg_off;
  1206. } else {
  1207. pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
  1208. return -EINVAL;
  1209. }
  1210. arg->arg_signed = arg_sz < 0;
  1211. if (arg_sz < 0)
  1212. arg_sz = -arg_sz;
  1213. switch (arg_sz) {
  1214. case 1: case 2: case 4: case 8:
  1215. arg->arg_bitshift = 64 - arg_sz * 8;
  1216. break;
  1217. default:
  1218. pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
  1219. arg_num, arg_str, arg_sz);
  1220. return -EINVAL;
  1221. }
  1222. return len;
  1223. }
  1224. #elif defined(__riscv)
  1225. static int calc_pt_regs_off(const char *reg_name)
  1226. {
  1227. static struct {
  1228. const char *name;
  1229. size_t pt_regs_off;
  1230. } reg_map[] = {
  1231. { "ra", offsetof(struct user_regs_struct, ra) },
  1232. { "sp", offsetof(struct user_regs_struct, sp) },
  1233. { "gp", offsetof(struct user_regs_struct, gp) },
  1234. { "tp", offsetof(struct user_regs_struct, tp) },
  1235. { "a0", offsetof(struct user_regs_struct, a0) },
  1236. { "a1", offsetof(struct user_regs_struct, a1) },
  1237. { "a2", offsetof(struct user_regs_struct, a2) },
  1238. { "a3", offsetof(struct user_regs_struct, a3) },
  1239. { "a4", offsetof(struct user_regs_struct, a4) },
  1240. { "a5", offsetof(struct user_regs_struct, a5) },
  1241. { "a6", offsetof(struct user_regs_struct, a6) },
  1242. { "a7", offsetof(struct user_regs_struct, a7) },
  1243. { "s0", offsetof(struct user_regs_struct, s0) },
  1244. { "s1", offsetof(struct user_regs_struct, s1) },
  1245. { "s2", offsetof(struct user_regs_struct, s2) },
  1246. { "s3", offsetof(struct user_regs_struct, s3) },
  1247. { "s4", offsetof(struct user_regs_struct, s4) },
  1248. { "s5", offsetof(struct user_regs_struct, s5) },
  1249. { "s6", offsetof(struct user_regs_struct, s6) },
  1250. { "s7", offsetof(struct user_regs_struct, s7) },
  1251. { "s8", offsetof(struct user_regs_struct, rv_s8) },
  1252. { "s9", offsetof(struct user_regs_struct, s9) },
  1253. { "s10", offsetof(struct user_regs_struct, s10) },
  1254. { "s11", offsetof(struct user_regs_struct, s11) },
  1255. { "t0", offsetof(struct user_regs_struct, t0) },
  1256. { "t1", offsetof(struct user_regs_struct, t1) },
  1257. { "t2", offsetof(struct user_regs_struct, t2) },
  1258. { "t3", offsetof(struct user_regs_struct, t3) },
  1259. { "t4", offsetof(struct user_regs_struct, t4) },
  1260. { "t5", offsetof(struct user_regs_struct, t5) },
  1261. { "t6", offsetof(struct user_regs_struct, t6) },
  1262. };
  1263. int i;
  1264. for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
  1265. if (strcmp(reg_name, reg_map[i].name) == 0)
  1266. return reg_map[i].pt_regs_off;
  1267. }
  1268. pr_warn("usdt: unrecognized register '%s'\n", reg_name);
  1269. return -ENOENT;
  1270. }
  1271. static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
  1272. {
  1273. char *reg_name = NULL;
  1274. int arg_sz, len, reg_off;
  1275. long off;
  1276. if (sscanf(arg_str, " %d @ %ld ( %m[a-z0-9] ) %n", &arg_sz, &off, &reg_name, &len) == 3) {
  1277. /* Memory dereference case, e.g., -8@-88(s0) */
  1278. arg->arg_type = USDT_ARG_REG_DEREF;
  1279. arg->val_off = off;
  1280. reg_off = calc_pt_regs_off(reg_name);
  1281. free(reg_name);
  1282. if (reg_off < 0)
  1283. return reg_off;
  1284. arg->reg_off = reg_off;
  1285. } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
  1286. /* Constant value case, e.g., 4@5 */
  1287. arg->arg_type = USDT_ARG_CONST;
  1288. arg->val_off = off;
  1289. arg->reg_off = 0;
  1290. } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
  1291. /* Register read case, e.g., -8@a1 */
  1292. arg->arg_type = USDT_ARG_REG;
  1293. arg->val_off = 0;
  1294. reg_off = calc_pt_regs_off(reg_name);
  1295. free(reg_name);
  1296. if (reg_off < 0)
  1297. return reg_off;
  1298. arg->reg_off = reg_off;
  1299. } else {
  1300. pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
  1301. return -EINVAL;
  1302. }
  1303. arg->arg_signed = arg_sz < 0;
  1304. if (arg_sz < 0)
  1305. arg_sz = -arg_sz;
  1306. switch (arg_sz) {
  1307. case 1: case 2: case 4: case 8:
  1308. arg->arg_bitshift = 64 - arg_sz * 8;
  1309. break;
  1310. default:
  1311. pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
  1312. arg_num, arg_str, arg_sz);
  1313. return -EINVAL;
  1314. }
  1315. return len;
  1316. }
  1317. #else
  1318. static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
  1319. {
  1320. pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n");
  1321. return -ENOTSUP;
  1322. }
  1323. #endif