perf session: Move the global threads list to perf_session
So that we can process two perf.data files. We still need to add a O_MMAP mode for perf_session so that we can do all the mmap stuff in it. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frédéric Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1260741029-4430-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:

committed by
Ingo Molnar

parent
ec91336973
commit
b3165f4144
@@ -730,18 +730,21 @@ struct trace_migrate_task_event {
|
||||
|
||||
struct trace_sched_handler {
|
||||
void (*switch_event)(struct trace_switch_event *,
|
||||
struct perf_session *,
|
||||
struct event *,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
struct thread *thread);
|
||||
|
||||
void (*runtime_event)(struct trace_runtime_event *,
|
||||
struct perf_session *,
|
||||
struct event *,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
struct thread *thread);
|
||||
|
||||
void (*wakeup_event)(struct trace_wakeup_event *,
|
||||
struct perf_session *,
|
||||
struct event *,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
@@ -754,6 +757,7 @@ struct trace_sched_handler {
|
||||
struct thread *thread);
|
||||
|
||||
void (*migrate_task_event)(struct trace_migrate_task_event *,
|
||||
struct perf_session *session,
|
||||
struct event *,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
@@ -763,6 +767,7 @@ struct trace_sched_handler {
|
||||
|
||||
static void
|
||||
replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
|
||||
struct perf_session *session __used,
|
||||
struct event *event,
|
||||
int cpu __used,
|
||||
u64 timestamp __used,
|
||||
@@ -789,6 +794,7 @@ static u64 cpu_last_switched[MAX_CPUS];
|
||||
|
||||
static void
|
||||
replay_switch_event(struct trace_switch_event *switch_event,
|
||||
struct perf_session *session __used,
|
||||
struct event *event,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
@@ -1022,6 +1028,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
|
||||
|
||||
static void
|
||||
latency_switch_event(struct trace_switch_event *switch_event,
|
||||
struct perf_session *session,
|
||||
struct event *event __used,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
@@ -1045,8 +1052,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
|
||||
die("hm, delta: %Ld < 0 ?\n", delta);
|
||||
|
||||
|
||||
sched_out = threads__findnew(switch_event->prev_pid);
|
||||
sched_in = threads__findnew(switch_event->next_pid);
|
||||
sched_out = perf_session__findnew(session, switch_event->prev_pid);
|
||||
sched_in = perf_session__findnew(session, switch_event->next_pid);
|
||||
|
||||
out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
|
||||
if (!out_events) {
|
||||
@@ -1074,12 +1081,13 @@ latency_switch_event(struct trace_switch_event *switch_event,
|
||||
|
||||
static void
|
||||
latency_runtime_event(struct trace_runtime_event *runtime_event,
|
||||
struct perf_session *session,
|
||||
struct event *event __used,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
struct thread *this_thread __used)
|
||||
{
|
||||
struct thread *thread = threads__findnew(runtime_event->pid);
|
||||
struct thread *thread = perf_session__findnew(session, runtime_event->pid);
|
||||
struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
|
||||
|
||||
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
|
||||
@@ -1096,6 +1104,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
|
||||
|
||||
static void
|
||||
latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
|
||||
struct perf_session *session,
|
||||
struct event *__event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp,
|
||||
@@ -1109,7 +1118,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
|
||||
if (!wakeup_event->success)
|
||||
return;
|
||||
|
||||
wakee = threads__findnew(wakeup_event->pid);
|
||||
wakee = perf_session__findnew(session, wakeup_event->pid);
|
||||
atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
|
||||
if (!atoms) {
|
||||
thread_atoms_insert(wakee);
|
||||
@@ -1143,6 +1152,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
|
||||
|
||||
static void
|
||||
latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
|
||||
struct perf_session *session,
|
||||
struct event *__event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp,
|
||||
@@ -1158,7 +1168,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
|
||||
if (profile_cpu == -1)
|
||||
return;
|
||||
|
||||
migrant = threads__findnew(migrate_task_event->pid);
|
||||
migrant = perf_session__findnew(session, migrate_task_event->pid);
|
||||
atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
|
||||
if (!atoms) {
|
||||
thread_atoms_insert(migrant);
|
||||
@@ -1353,7 +1363,7 @@ static void sort_lat(void)
|
||||
static struct trace_sched_handler *trace_handler;
|
||||
|
||||
static void
|
||||
process_sched_wakeup_event(void *data,
|
||||
process_sched_wakeup_event(void *data, struct perf_session *session,
|
||||
struct event *event,
|
||||
int cpu __used,
|
||||
u64 timestamp __used,
|
||||
@@ -1370,7 +1380,8 @@ process_sched_wakeup_event(void *data,
|
||||
FILL_FIELD(wakeup_event, cpu, event, data);
|
||||
|
||||
if (trace_handler->wakeup_event)
|
||||
trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
|
||||
trace_handler->wakeup_event(&wakeup_event, session, event,
|
||||
cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1388,6 +1399,7 @@ static char next_shortname2 = '0';
|
||||
|
||||
static void
|
||||
map_switch_event(struct trace_switch_event *switch_event,
|
||||
struct perf_session *session,
|
||||
struct event *event __used,
|
||||
int this_cpu,
|
||||
u64 timestamp,
|
||||
@@ -1415,8 +1427,8 @@ map_switch_event(struct trace_switch_event *switch_event,
|
||||
die("hm, delta: %Ld < 0 ?\n", delta);
|
||||
|
||||
|
||||
sched_out = threads__findnew(switch_event->prev_pid);
|
||||
sched_in = threads__findnew(switch_event->next_pid);
|
||||
sched_out = perf_session__findnew(session, switch_event->prev_pid);
|
||||
sched_in = perf_session__findnew(session, switch_event->next_pid);
|
||||
|
||||
curr_thread[this_cpu] = sched_in;
|
||||
|
||||
@@ -1466,7 +1478,7 @@ map_switch_event(struct trace_switch_event *switch_event,
|
||||
|
||||
|
||||
static void
|
||||
process_sched_switch_event(void *data,
|
||||
process_sched_switch_event(void *data, struct perf_session *session,
|
||||
struct event *event,
|
||||
int this_cpu,
|
||||
u64 timestamp __used,
|
||||
@@ -1493,13 +1505,14 @@ process_sched_switch_event(void *data,
|
||||
nr_context_switch_bugs++;
|
||||
}
|
||||
if (trace_handler->switch_event)
|
||||
trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
|
||||
trace_handler->switch_event(&switch_event, session, event,
|
||||
this_cpu, timestamp, thread);
|
||||
|
||||
curr_pid[this_cpu] = switch_event.next_pid;
|
||||
}
|
||||
|
||||
static void
|
||||
process_sched_runtime_event(void *data,
|
||||
process_sched_runtime_event(void *data, struct perf_session *session,
|
||||
struct event *event,
|
||||
int cpu __used,
|
||||
u64 timestamp __used,
|
||||
@@ -1513,7 +1526,7 @@ process_sched_runtime_event(void *data,
|
||||
FILL_FIELD(runtime_event, vruntime, event, data);
|
||||
|
||||
if (trace_handler->runtime_event)
|
||||
trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
|
||||
trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1533,7 +1546,8 @@ process_sched_fork_event(void *data,
|
||||
FILL_FIELD(fork_event, child_pid, event, data);
|
||||
|
||||
if (trace_handler->fork_event)
|
||||
trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
|
||||
trace_handler->fork_event(&fork_event, event,
|
||||
cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1547,7 +1561,7 @@ process_sched_exit_event(struct event *event,
|
||||
}
|
||||
|
||||
static void
|
||||
process_sched_migrate_task_event(void *data,
|
||||
process_sched_migrate_task_event(void *data, struct perf_session *session,
|
||||
struct event *event,
|
||||
int cpu __used,
|
||||
u64 timestamp __used,
|
||||
@@ -1563,12 +1577,13 @@ process_sched_migrate_task_event(void *data,
|
||||
FILL_FIELD(migrate_task_event, cpu, event, data);
|
||||
|
||||
if (trace_handler->migrate_task_event)
|
||||
trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
|
||||
trace_handler->migrate_task_event(&migrate_task_event, session,
|
||||
event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static void
|
||||
process_raw_event(event_t *raw_event __used, void *data,
|
||||
int cpu, u64 timestamp, struct thread *thread)
|
||||
process_raw_event(event_t *raw_event __used, struct perf_session *session,
|
||||
void *data, int cpu, u64 timestamp, struct thread *thread)
|
||||
{
|
||||
struct event *event;
|
||||
int type;
|
||||
@@ -1578,23 +1593,22 @@ process_raw_event(event_t *raw_event __used, void *data,
|
||||
event = trace_find_event(type);
|
||||
|
||||
if (!strcmp(event->name, "sched_switch"))
|
||||
process_sched_switch_event(data, event, cpu, timestamp, thread);
|
||||
process_sched_switch_event(data, session, event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "sched_stat_runtime"))
|
||||
process_sched_runtime_event(data, event, cpu, timestamp, thread);
|
||||
process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "sched_wakeup"))
|
||||
process_sched_wakeup_event(data, event, cpu, timestamp, thread);
|
||||
process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "sched_wakeup_new"))
|
||||
process_sched_wakeup_event(data, event, cpu, timestamp, thread);
|
||||
process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "sched_process_fork"))
|
||||
process_sched_fork_event(data, event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "sched_process_exit"))
|
||||
process_sched_exit_event(event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "sched_migrate_task"))
|
||||
process_sched_migrate_task_event(data, event, cpu, timestamp, thread);
|
||||
process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static int process_sample_event(event_t *event,
|
||||
struct perf_session *session __used)
|
||||
static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
{
|
||||
struct sample_data data;
|
||||
struct thread *thread;
|
||||
@@ -1615,7 +1629,7 @@ static int process_sample_event(event_t *event,
|
||||
(void *)(long)data.ip,
|
||||
(long long)data.period);
|
||||
|
||||
thread = threads__findnew(data.pid);
|
||||
thread = perf_session__findnew(session, data.pid);
|
||||
if (thread == NULL) {
|
||||
pr_debug("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
@@ -1627,7 +1641,7 @@ static int process_sample_event(event_t *event,
|
||||
if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
|
||||
return 0;
|
||||
|
||||
process_raw_event(event, data.raw_data, data.cpu, data.time, thread);
|
||||
process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user