evlist.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <perf/evlist.h>
  3. #include <perf/evsel.h>
  4. #include <linux/bitops.h>
  5. #include <linux/list.h>
  6. #include <linux/hash.h>
  7. #include <sys/ioctl.h>
  8. #include <internal/evlist.h>
  9. #include <internal/evsel.h>
  10. #include <internal/xyarray.h>
  11. #include <internal/mmap.h>
  12. #include <internal/cpumap.h>
  13. #include <internal/threadmap.h>
  14. #include <internal/lib.h>
  15. #include <linux/zalloc.h>
  16. #include <stdlib.h>
  17. #include <errno.h>
  18. #include <unistd.h>
  19. #include <fcntl.h>
  20. #include <signal.h>
  21. #include <poll.h>
  22. #include <sys/mman.h>
  23. #include <perf/cpumap.h>
  24. #include <perf/threadmap.h>
  25. #include <api/fd/array.h>
  26. #include "internal.h"
  27. void perf_evlist__init(struct perf_evlist *evlist)
  28. {
  29. INIT_LIST_HEAD(&evlist->entries);
  30. evlist->nr_entries = 0;
  31. fdarray__init(&evlist->pollfd, 64);
  32. perf_evlist__reset_id_hash(evlist);
  33. }
  34. static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
  35. struct perf_evsel *evsel)
  36. {
  37. /*
  38. * We already have cpus for evsel (via PMU sysfs) so
  39. * keep it, if there's no target cpu list defined.
  40. */
  41. if (evsel->system_wide) {
  42. perf_cpu_map__put(evsel->cpus);
  43. evsel->cpus = perf_cpu_map__new(NULL);
  44. } else if (!evsel->own_cpus || evlist->has_user_cpus ||
  45. (!evsel->requires_cpu && perf_cpu_map__empty(evlist->user_requested_cpus))) {
  46. perf_cpu_map__put(evsel->cpus);
  47. evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
  48. } else if (evsel->cpus != evsel->own_cpus) {
  49. perf_cpu_map__put(evsel->cpus);
  50. evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
  51. }
  52. if (evsel->system_wide) {
  53. perf_thread_map__put(evsel->threads);
  54. evsel->threads = perf_thread_map__new_dummy();
  55. } else {
  56. perf_thread_map__put(evsel->threads);
  57. evsel->threads = perf_thread_map__get(evlist->threads);
  58. }
  59. evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
  60. }
  61. static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
  62. {
  63. struct perf_evsel *evsel;
  64. evlist->needs_map_propagation = true;
  65. perf_evlist__for_each_evsel(evlist, evsel)
  66. __perf_evlist__propagate_maps(evlist, evsel);
  67. }
  68. void perf_evlist__add(struct perf_evlist *evlist,
  69. struct perf_evsel *evsel)
  70. {
  71. evsel->idx = evlist->nr_entries;
  72. list_add_tail(&evsel->node, &evlist->entries);
  73. evlist->nr_entries += 1;
  74. if (evlist->needs_map_propagation)
  75. __perf_evlist__propagate_maps(evlist, evsel);
  76. }
  77. void perf_evlist__remove(struct perf_evlist *evlist,
  78. struct perf_evsel *evsel)
  79. {
  80. list_del_init(&evsel->node);
  81. evlist->nr_entries -= 1;
  82. }
  83. struct perf_evlist *perf_evlist__new(void)
  84. {
  85. struct perf_evlist *evlist = zalloc(sizeof(*evlist));
  86. if (evlist != NULL)
  87. perf_evlist__init(evlist);
  88. return evlist;
  89. }
  90. struct perf_evsel *
  91. perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
  92. {
  93. struct perf_evsel *next;
  94. if (!prev) {
  95. next = list_first_entry(&evlist->entries,
  96. struct perf_evsel,
  97. node);
  98. } else {
  99. next = list_next_entry(prev, node);
  100. }
  101. /* Empty list is noticed here so don't need checking on entry. */
  102. if (&next->node == &evlist->entries)
  103. return NULL;
  104. return next;
  105. }
  106. static void perf_evlist__purge(struct perf_evlist *evlist)
  107. {
  108. struct perf_evsel *pos, *n;
  109. perf_evlist__for_each_entry_safe(evlist, n, pos) {
  110. list_del_init(&pos->node);
  111. perf_evsel__delete(pos);
  112. }
  113. evlist->nr_entries = 0;
  114. }
  115. void perf_evlist__exit(struct perf_evlist *evlist)
  116. {
  117. perf_cpu_map__put(evlist->user_requested_cpus);
  118. perf_cpu_map__put(evlist->all_cpus);
  119. perf_thread_map__put(evlist->threads);
  120. evlist->user_requested_cpus = NULL;
  121. evlist->all_cpus = NULL;
  122. evlist->threads = NULL;
  123. fdarray__exit(&evlist->pollfd);
  124. }
  125. void perf_evlist__delete(struct perf_evlist *evlist)
  126. {
  127. if (evlist == NULL)
  128. return;
  129. perf_evlist__munmap(evlist);
  130. perf_evlist__close(evlist);
  131. perf_evlist__purge(evlist);
  132. perf_evlist__exit(evlist);
  133. free(evlist);
  134. }
  135. void perf_evlist__set_maps(struct perf_evlist *evlist,
  136. struct perf_cpu_map *cpus,
  137. struct perf_thread_map *threads)
  138. {
  139. /*
  140. * Allow for the possibility that one or another of the maps isn't being
  141. * changed i.e. don't put it. Note we are assuming the maps that are
  142. * being applied are brand new and evlist is taking ownership of the
  143. * original reference count of 1. If that is not the case it is up to
  144. * the caller to increase the reference count.
  145. */
  146. if (cpus != evlist->user_requested_cpus) {
  147. perf_cpu_map__put(evlist->user_requested_cpus);
  148. evlist->user_requested_cpus = perf_cpu_map__get(cpus);
  149. }
  150. if (threads != evlist->threads) {
  151. perf_thread_map__put(evlist->threads);
  152. evlist->threads = perf_thread_map__get(threads);
  153. }
  154. perf_evlist__propagate_maps(evlist);
  155. }
  156. int perf_evlist__open(struct perf_evlist *evlist)
  157. {
  158. struct perf_evsel *evsel;
  159. int err;
  160. perf_evlist__for_each_entry(evlist, evsel) {
  161. err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
  162. if (err < 0)
  163. goto out_err;
  164. }
  165. return 0;
  166. out_err:
  167. perf_evlist__close(evlist);
  168. return err;
  169. }
  170. void perf_evlist__close(struct perf_evlist *evlist)
  171. {
  172. struct perf_evsel *evsel;
  173. perf_evlist__for_each_entry_reverse(evlist, evsel)
  174. perf_evsel__close(evsel);
  175. }
  176. void perf_evlist__enable(struct perf_evlist *evlist)
  177. {
  178. struct perf_evsel *evsel;
  179. perf_evlist__for_each_entry(evlist, evsel)
  180. perf_evsel__enable(evsel);
  181. }
  182. void perf_evlist__disable(struct perf_evlist *evlist)
  183. {
  184. struct perf_evsel *evsel;
  185. perf_evlist__for_each_entry(evlist, evsel)
  186. perf_evsel__disable(evsel);
  187. }
  188. u64 perf_evlist__read_format(struct perf_evlist *evlist)
  189. {
  190. struct perf_evsel *first = perf_evlist__first(evlist);
  191. return first->attr.read_format;
  192. }
  193. #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
  194. static void perf_evlist__id_hash(struct perf_evlist *evlist,
  195. struct perf_evsel *evsel,
  196. int cpu, int thread, u64 id)
  197. {
  198. int hash;
  199. struct perf_sample_id *sid = SID(evsel, cpu, thread);
  200. sid->id = id;
  201. sid->evsel = evsel;
  202. hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
  203. hlist_add_head(&sid->node, &evlist->heads[hash]);
  204. }
  205. void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
  206. {
  207. int i;
  208. for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
  209. INIT_HLIST_HEAD(&evlist->heads[i]);
  210. }
  211. void perf_evlist__id_add(struct perf_evlist *evlist,
  212. struct perf_evsel *evsel,
  213. int cpu, int thread, u64 id)
  214. {
  215. perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
  216. evsel->id[evsel->ids++] = id;
  217. }
  218. int perf_evlist__id_add_fd(struct perf_evlist *evlist,
  219. struct perf_evsel *evsel,
  220. int cpu, int thread, int fd)
  221. {
  222. u64 read_data[4] = { 0, };
  223. int id_idx = 1; /* The first entry is the counter value */
  224. u64 id;
  225. int ret;
  226. ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
  227. if (!ret)
  228. goto add;
  229. if (errno != ENOTTY)
  230. return -1;
  231. /* Legacy way to get event id.. All hail to old kernels! */
  232. /*
  233. * This way does not work with group format read, so bail
  234. * out in that case.
  235. */
  236. if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
  237. return -1;
  238. if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
  239. read(fd, &read_data, sizeof(read_data)) == -1)
  240. return -1;
  241. if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  242. ++id_idx;
  243. if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  244. ++id_idx;
  245. id = read_data[id_idx];
  246. add:
  247. perf_evlist__id_add(evlist, evsel, cpu, thread, id);
  248. return 0;
  249. }
  250. int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
  251. {
  252. int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
  253. int nr_threads = perf_thread_map__nr(evlist->threads);
  254. int nfds = 0;
  255. struct perf_evsel *evsel;
  256. perf_evlist__for_each_entry(evlist, evsel) {
  257. if (evsel->system_wide)
  258. nfds += nr_cpus;
  259. else
  260. nfds += nr_cpus * nr_threads;
  261. }
  262. if (fdarray__available_entries(&evlist->pollfd) < nfds &&
  263. fdarray__grow(&evlist->pollfd, nfds) < 0)
  264. return -ENOMEM;
  265. return 0;
  266. }
  267. int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
  268. void *ptr, short revent, enum fdarray_flags flags)
  269. {
  270. int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
  271. if (pos >= 0) {
  272. evlist->pollfd.priv[pos].ptr = ptr;
  273. fcntl(fd, F_SETFL, O_NONBLOCK);
  274. }
  275. return pos;
  276. }
  277. static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
  278. void *arg __maybe_unused)
  279. {
  280. struct perf_mmap *map = fda->priv[fd].ptr;
  281. if (map)
  282. perf_mmap__put(map);
  283. }
  284. int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
  285. {
  286. return fdarray__filter(&evlist->pollfd, revents_and_mask,
  287. perf_evlist__munmap_filtered, NULL);
  288. }
  289. int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
  290. {
  291. return fdarray__poll(&evlist->pollfd, timeout);
  292. }
  293. static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
  294. {
  295. int i;
  296. struct perf_mmap *map;
  297. map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
  298. if (!map)
  299. return NULL;
  300. for (i = 0; i < evlist->nr_mmaps; i++) {
  301. struct perf_mmap *prev = i ? &map[i - 1] : NULL;
  302. /*
  303. * When the perf_mmap() call is made we grab one refcount, plus
  304. * one extra to let perf_mmap__consume() get the last
  305. * events after all real references (perf_mmap__get()) are
  306. * dropped.
  307. *
  308. * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
  309. * thus does perf_mmap__get() on it.
  310. */
  311. perf_mmap__init(&map[i], prev, overwrite, NULL);
  312. }
  313. return map;
  314. }
  315. static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
  316. {
  317. struct perf_sample_id *sid = SID(evsel, cpu, thread);
  318. sid->idx = idx;
  319. sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
  320. sid->tid = perf_thread_map__pid(evsel->threads, thread);
  321. }
  322. static struct perf_mmap*
  323. perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
  324. {
  325. struct perf_mmap *maps;
  326. maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
  327. if (!maps) {
  328. maps = perf_evlist__alloc_mmap(evlist, overwrite);
  329. if (!maps)
  330. return NULL;
  331. if (overwrite)
  332. evlist->mmap_ovw = maps;
  333. else
  334. evlist->mmap = maps;
  335. }
  336. return &maps[idx];
  337. }
  338. #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
  339. static int
  340. perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
  341. int output, struct perf_cpu cpu)
  342. {
  343. return perf_mmap__mmap(map, mp, output, cpu);
  344. }
  345. static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
  346. bool overwrite)
  347. {
  348. if (overwrite)
  349. evlist->mmap_ovw_first = map;
  350. else
  351. evlist->mmap_first = map;
  352. }
  353. static int
  354. mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
  355. int idx, struct perf_mmap_param *mp, int cpu_idx,
  356. int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
  357. {
  358. struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
  359. struct perf_evsel *evsel;
  360. int revent;
  361. perf_evlist__for_each_entry(evlist, evsel) {
  362. bool overwrite = evsel->attr.write_backward;
  363. enum fdarray_flags flgs;
  364. struct perf_mmap *map;
  365. int *output, fd, cpu;
  366. if (evsel->system_wide && thread)
  367. continue;
  368. cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
  369. if (cpu == -1)
  370. continue;
  371. map = ops->get(evlist, overwrite, idx);
  372. if (map == NULL)
  373. return -ENOMEM;
  374. if (overwrite) {
  375. mp->prot = PROT_READ;
  376. output = _output_overwrite;
  377. } else {
  378. mp->prot = PROT_READ | PROT_WRITE;
  379. output = _output;
  380. }
  381. fd = FD(evsel, cpu, thread);
  382. if (*output == -1) {
  383. *output = fd;
  384. /*
  385. * The last one will be done at perf_mmap__consume(), so that we
  386. * make sure we don't prevent tools from consuming every last event in
  387. * the ring buffer.
  388. *
  389. * I.e. we can get the POLLHUP meaning that the fd doesn't exist
  390. * anymore, but the last events for it are still in the ring buffer,
  391. * waiting to be consumed.
  392. *
  393. * Tools can chose to ignore this at their own discretion, but the
  394. * evlist layer can't just drop it when filtering events in
  395. * perf_evlist__filter_pollfd().
  396. */
  397. refcount_set(&map->refcnt, 2);
  398. if (ops->idx)
  399. ops->idx(evlist, evsel, mp, idx);
  400. /* Debug message used by test scripts */
  401. pr_debug("idx %d: mmapping fd %d\n", idx, *output);
  402. if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
  403. return -1;
  404. *nr_mmaps += 1;
  405. if (!idx)
  406. perf_evlist__set_mmap_first(evlist, map, overwrite);
  407. } else {
  408. /* Debug message used by test scripts */
  409. pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
  410. if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
  411. return -1;
  412. perf_mmap__get(map);
  413. }
  414. revent = !overwrite ? POLLIN : 0;
  415. flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
  416. if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
  417. perf_mmap__put(map);
  418. return -1;
  419. }
  420. if (evsel->attr.read_format & PERF_FORMAT_ID) {
  421. if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
  422. fd) < 0)
  423. return -1;
  424. perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
  425. }
  426. }
  427. return 0;
  428. }
  429. static int
  430. mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
  431. struct perf_mmap_param *mp)
  432. {
  433. int nr_threads = perf_thread_map__nr(evlist->threads);
  434. int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
  435. int cpu, thread, idx = 0;
  436. int nr_mmaps = 0;
  437. pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
  438. __func__, nr_cpus, nr_threads);
  439. /* per-thread mmaps */
  440. for (thread = 0; thread < nr_threads; thread++, idx++) {
  441. int output = -1;
  442. int output_overwrite = -1;
  443. if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
  444. &output_overwrite, &nr_mmaps))
  445. goto out_unmap;
  446. }
  447. /* system-wide mmaps i.e. per-cpu */
  448. for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
  449. int output = -1;
  450. int output_overwrite = -1;
  451. if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
  452. &output_overwrite, &nr_mmaps))
  453. goto out_unmap;
  454. }
  455. if (nr_mmaps != evlist->nr_mmaps)
  456. pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
  457. return 0;
  458. out_unmap:
  459. perf_evlist__munmap(evlist);
  460. return -1;
  461. }
  462. static int
  463. mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
  464. struct perf_mmap_param *mp)
  465. {
  466. int nr_threads = perf_thread_map__nr(evlist->threads);
  467. int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
  468. int nr_mmaps = 0;
  469. int cpu, thread;
  470. pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
  471. for (cpu = 0; cpu < nr_cpus; cpu++) {
  472. int output = -1;
  473. int output_overwrite = -1;
  474. for (thread = 0; thread < nr_threads; thread++) {
  475. if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
  476. thread, &output, &output_overwrite, &nr_mmaps))
  477. goto out_unmap;
  478. }
  479. }
  480. if (nr_mmaps != evlist->nr_mmaps)
  481. pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
  482. return 0;
  483. out_unmap:
  484. perf_evlist__munmap(evlist);
  485. return -1;
  486. }
  487. static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
  488. {
  489. int nr_mmaps;
  490. /* One for each CPU */
  491. nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
  492. if (perf_cpu_map__empty(evlist->all_cpus)) {
  493. /* Plus one for each thread */
  494. nr_mmaps += perf_thread_map__nr(evlist->threads);
  495. /* Minus the per-thread CPU (-1) */
  496. nr_mmaps -= 1;
  497. }
  498. return nr_mmaps;
  499. }
  500. int perf_evlist__mmap_ops(struct perf_evlist *evlist,
  501. struct perf_evlist_mmap_ops *ops,
  502. struct perf_mmap_param *mp)
  503. {
  504. const struct perf_cpu_map *cpus = evlist->all_cpus;
  505. struct perf_evsel *evsel;
  506. if (!ops || !ops->get || !ops->mmap)
  507. return -EINVAL;
  508. mp->mask = evlist->mmap_len - page_size - 1;
  509. evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
  510. perf_evlist__for_each_entry(evlist, evsel) {
  511. if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
  512. evsel->sample_id == NULL &&
  513. perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
  514. return -ENOMEM;
  515. }
  516. if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
  517. return -ENOMEM;
  518. if (perf_cpu_map__empty(cpus))
  519. return mmap_per_thread(evlist, ops, mp);
  520. return mmap_per_cpu(evlist, ops, mp);
  521. }
  522. int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
  523. {
  524. struct perf_mmap_param mp;
  525. struct perf_evlist_mmap_ops ops = {
  526. .get = perf_evlist__mmap_cb_get,
  527. .mmap = perf_evlist__mmap_cb_mmap,
  528. };
  529. evlist->mmap_len = (pages + 1) * page_size;
  530. return perf_evlist__mmap_ops(evlist, &ops, &mp);
  531. }
  532. void perf_evlist__munmap(struct perf_evlist *evlist)
  533. {
  534. int i;
  535. if (evlist->mmap) {
  536. for (i = 0; i < evlist->nr_mmaps; i++)
  537. perf_mmap__munmap(&evlist->mmap[i]);
  538. }
  539. if (evlist->mmap_ovw) {
  540. for (i = 0; i < evlist->nr_mmaps; i++)
  541. perf_mmap__munmap(&evlist->mmap_ovw[i]);
  542. }
  543. zfree(&evlist->mmap);
  544. zfree(&evlist->mmap_ovw);
  545. }
  546. struct perf_mmap*
  547. perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
  548. bool overwrite)
  549. {
  550. if (map)
  551. return map->next;
  552. return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
  553. }
  554. void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
  555. {
  556. struct perf_evsel *first, *last, *evsel;
  557. first = list_first_entry(list, struct perf_evsel, node);
  558. last = list_last_entry(list, struct perf_evsel, node);
  559. leader->nr_members = last->idx - first->idx + 1;
  560. __perf_evlist__for_each_entry(list, evsel)
  561. evsel->leader = leader;
  562. }
  563. void perf_evlist__set_leader(struct perf_evlist *evlist)
  564. {
  565. if (evlist->nr_entries) {
  566. struct perf_evsel *first = list_entry(evlist->entries.next,
  567. struct perf_evsel, node);
  568. evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
  569. __perf_evlist__set_leader(&evlist->entries, first);
  570. }
  571. }