pids.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
  2. /* Copyright (C) 2020 Facebook */
  3. #include <errno.h>
  4. #include <linux/err.h>
  5. #include <stdbool.h>
  6. #include <stdio.h>
  7. #include <stdlib.h>
  8. #include <string.h>
  9. #include <unistd.h>
  10. #include <bpf/bpf.h>
  11. #include <bpf/hashmap.h>
  12. #include "main.h"
  13. #include "skeleton/pid_iter.h"
  14. #ifdef BPFTOOL_WITHOUT_SKELETONS
  15. int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
  16. {
  17. return -ENOTSUP;
  18. }
  19. void delete_obj_refs_table(struct hashmap *map) {}
  20. void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {}
  21. void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {}
  22. #else /* BPFTOOL_WITHOUT_SKELETONS */
  23. #include "pid_iter.skel.h"
  24. static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
  25. {
  26. struct hashmap_entry *entry;
  27. struct obj_refs *refs;
  28. struct obj_ref *ref;
  29. int err, i;
  30. void *tmp;
  31. hashmap__for_each_key_entry(map, entry, u32_as_hash_field(e->id)) {
  32. refs = entry->value;
  33. for (i = 0; i < refs->ref_cnt; i++) {
  34. if (refs->refs[i].pid == e->pid)
  35. return;
  36. }
  37. tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref));
  38. if (!tmp) {
  39. p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...",
  40. e->id, e->pid, e->comm);
  41. return;
  42. }
  43. refs->refs = tmp;
  44. ref = &refs->refs[refs->ref_cnt];
  45. ref->pid = e->pid;
  46. memcpy(ref->comm, e->comm, sizeof(ref->comm));
  47. refs->ref_cnt++;
  48. return;
  49. }
  50. /* new ref */
  51. refs = calloc(1, sizeof(*refs));
  52. if (!refs) {
  53. p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
  54. e->id, e->pid, e->comm);
  55. return;
  56. }
  57. refs->refs = malloc(sizeof(*refs->refs));
  58. if (!refs->refs) {
  59. free(refs);
  60. p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
  61. e->id, e->pid, e->comm);
  62. return;
  63. }
  64. ref = &refs->refs[0];
  65. ref->pid = e->pid;
  66. memcpy(ref->comm, e->comm, sizeof(ref->comm));
  67. refs->ref_cnt = 1;
  68. refs->has_bpf_cookie = e->has_bpf_cookie;
  69. refs->bpf_cookie = e->bpf_cookie;
  70. err = hashmap__append(map, u32_as_hash_field(e->id), refs);
  71. if (err)
  72. p_err("failed to append entry to hashmap for ID %u: %s",
  73. e->id, strerror(errno));
  74. }
  75. static int __printf(2, 0)
  76. libbpf_print_none(__maybe_unused enum libbpf_print_level level,
  77. __maybe_unused const char *format,
  78. __maybe_unused va_list args)
  79. {
  80. return 0;
  81. }
  82. int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
  83. {
  84. struct pid_iter_entry *e;
  85. char buf[4096 / sizeof(*e) * sizeof(*e)];
  86. struct pid_iter_bpf *skel;
  87. int err, ret, fd = -1, i;
  88. libbpf_print_fn_t default_print;
  89. *map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
  90. if (IS_ERR(*map)) {
  91. p_err("failed to create hashmap for PID references");
  92. return -1;
  93. }
  94. set_max_rlimit();
  95. skel = pid_iter_bpf__open();
  96. if (!skel) {
  97. p_err("failed to open PID iterator skeleton");
  98. return -1;
  99. }
  100. skel->rodata->obj_type = type;
  101. /* we don't want output polluted with libbpf errors if bpf_iter is not
  102. * supported
  103. */
  104. default_print = libbpf_set_print(libbpf_print_none);
  105. err = pid_iter_bpf__load(skel);
  106. libbpf_set_print(default_print);
  107. if (err) {
  108. /* too bad, kernel doesn't support BPF iterators yet */
  109. err = 0;
  110. goto out;
  111. }
  112. err = pid_iter_bpf__attach(skel);
  113. if (err) {
  114. /* if we loaded above successfully, attach has to succeed */
  115. p_err("failed to attach PID iterator: %d", err);
  116. goto out;
  117. }
  118. fd = bpf_iter_create(bpf_link__fd(skel->links.iter));
  119. if (fd < 0) {
  120. err = -errno;
  121. p_err("failed to create PID iterator session: %d", err);
  122. goto out;
  123. }
  124. while (true) {
  125. ret = read(fd, buf, sizeof(buf));
  126. if (ret < 0) {
  127. if (errno == EAGAIN)
  128. continue;
  129. err = -errno;
  130. p_err("failed to read PID iterator output: %d", err);
  131. goto out;
  132. }
  133. if (ret == 0)
  134. break;
  135. if (ret % sizeof(*e)) {
  136. err = -EINVAL;
  137. p_err("invalid PID iterator output format");
  138. goto out;
  139. }
  140. ret /= sizeof(*e);
  141. e = (void *)buf;
  142. for (i = 0; i < ret; i++, e++) {
  143. add_ref(*map, e);
  144. }
  145. }
  146. err = 0;
  147. out:
  148. if (fd >= 0)
  149. close(fd);
  150. pid_iter_bpf__destroy(skel);
  151. return err;
  152. }
  153. void delete_obj_refs_table(struct hashmap *map)
  154. {
  155. struct hashmap_entry *entry;
  156. size_t bkt;
  157. if (!map)
  158. return;
  159. hashmap__for_each_entry(map, entry, bkt) {
  160. struct obj_refs *refs = entry->value;
  161. free(refs->refs);
  162. free(refs);
  163. }
  164. hashmap__free(map);
  165. }
  166. void emit_obj_refs_json(struct hashmap *map, __u32 id,
  167. json_writer_t *json_writer)
  168. {
  169. struct hashmap_entry *entry;
  170. if (hashmap__empty(map))
  171. return;
  172. hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) {
  173. struct obj_refs *refs = entry->value;
  174. int i;
  175. if (refs->ref_cnt == 0)
  176. break;
  177. if (refs->has_bpf_cookie)
  178. jsonw_lluint_field(json_writer, "bpf_cookie", refs->bpf_cookie);
  179. jsonw_name(json_writer, "pids");
  180. jsonw_start_array(json_writer);
  181. for (i = 0; i < refs->ref_cnt; i++) {
  182. struct obj_ref *ref = &refs->refs[i];
  183. jsonw_start_object(json_writer);
  184. jsonw_int_field(json_writer, "pid", ref->pid);
  185. jsonw_string_field(json_writer, "comm", ref->comm);
  186. jsonw_end_object(json_writer);
  187. }
  188. jsonw_end_array(json_writer);
  189. break;
  190. }
  191. }
  192. void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
  193. {
  194. struct hashmap_entry *entry;
  195. if (hashmap__empty(map))
  196. return;
  197. hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) {
  198. struct obj_refs *refs = entry->value;
  199. int i;
  200. if (refs->ref_cnt == 0)
  201. break;
  202. if (refs->has_bpf_cookie)
  203. printf("\n\tbpf_cookie %llu", (unsigned long long) refs->bpf_cookie);
  204. printf("%s", prefix);
  205. for (i = 0; i < refs->ref_cnt; i++) {
  206. struct obj_ref *ref = &refs->refs[i];
  207. printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
  208. }
  209. break;
  210. }
  211. }
  212. #endif