evsel.h 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LIBPERF_INTERNAL_EVSEL_H
  3. #define __LIBPERF_INTERNAL_EVSEL_H
  4. #include <linux/types.h>
  5. #include <linux/perf_event.h>
  6. #include <stdbool.h>
  7. #include <sys/types.h>
  8. #include <internal/cpumap.h>
  9. struct perf_thread_map;
  10. struct xyarray;
  11. /*
  12. * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
  13. * more than one entry in the evlist.
  14. */
  15. struct perf_sample_id {
  16. struct hlist_node node;
  17. u64 id;
  18. struct perf_evsel *evsel;
  19. /*
  20. * 'idx' will be used for AUX area sampling. A sample will have AUX area
  21. * data that will be queued for decoding, where there are separate
  22. * queues for each CPU (per-cpu tracing) or task (per-thread tracing).
  23. * The sample ID can be used to lookup 'idx' which is effectively the
  24. * queue number.
  25. */
  26. int idx;
  27. struct perf_cpu cpu;
  28. pid_t tid;
  29. /* Guest machine pid and VCPU, valid only if machine_pid is non-zero */
  30. pid_t machine_pid;
  31. struct perf_cpu vcpu;
  32. /* Holds total ID period value for PERF_SAMPLE_READ processing. */
  33. u64 period;
  34. };
  35. struct perf_evsel {
  36. struct list_head node;
  37. struct perf_event_attr attr;
  38. struct perf_cpu_map *cpus;
  39. struct perf_cpu_map *own_cpus;
  40. struct perf_thread_map *threads;
  41. struct xyarray *fd;
  42. struct xyarray *mmap;
  43. struct xyarray *sample_id;
  44. u64 *id;
  45. u32 ids;
  46. struct perf_evsel *leader;
  47. /* parse modifier helper */
  48. int nr_members;
  49. /*
  50. * system_wide is for events that need to be on every CPU, irrespective
  51. * of user requested CPUs or threads. Map propagation will set cpus to
  52. * this event's own_cpus, whereby they will contribute to evlist
  53. * all_cpus.
  54. */
  55. bool system_wide;
  56. /*
  57. * Some events, for example uncore events, require a CPU.
  58. * i.e. it cannot be the 'any CPU' value of -1.
  59. */
  60. bool requires_cpu;
  61. int idx;
  62. };
  63. void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
  64. int idx);
  65. int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
  66. void perf_evsel__close_fd(struct perf_evsel *evsel);
  67. void perf_evsel__free_fd(struct perf_evsel *evsel);
  68. int perf_evsel__read_size(struct perf_evsel *evsel);
  69. int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
  70. int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
  71. void perf_evsel__free_id(struct perf_evsel *evsel);
  72. #endif /* __LIBPERF_INTERNAL_EVSEL_H */