perf top: Get rid of *_threaded() functions

Those _threaded() functions are needed to make hist tree handling
thread-safe, but AFAICS the only thing it does is forcing it to use
the intermediate 'collapsed' tree.

This can be acheived by setting sort__need_collapse to 1 in cmd_top() so
no need to keep those _threaded() variants.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1368497347-9628-4-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Namhyung Kim
2013-05-14 11:09:01 +09:00
committed by Arnaldo Carvalho de Melo
parent 6f29097f45
commit 3a5714f8b5
3 changed files with 19 additions and 52 deletions

View File

@@ -240,8 +240,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
return he->stat.period == 0;
}
static void __hists__decay_entries(struct hists *hists, bool zap_user,
bool zap_kernel, bool threaded)
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
struct rb_node *next = rb_first(&hists->entries);
struct hist_entry *n;
@@ -260,7 +259,7 @@ static void __hists__decay_entries(struct hists *hists, bool zap_user,
!n->used) {
rb_erase(&n->rb_node, &hists->entries);
if (sort__need_collapse || threaded)
if (sort__need_collapse)
rb_erase(&n->rb_node_in, &hists->entries_collapsed);
hist_entry__free(n);
@@ -269,17 +268,6 @@ static void __hists__decay_entries(struct hists *hists, bool zap_user,
}
}
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
return __hists__decay_entries(hists, zap_user, zap_kernel, false);
}
void hists__decay_entries_threaded(struct hists *hists,
bool zap_user, bool zap_kernel)
{
return __hists__decay_entries(hists, zap_user, zap_kernel, true);
}
/*
* histogram, sorted on item, collects periods
*/
@@ -613,13 +601,13 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
hists__filter_entry_by_symbol(hists, he);
}
static void __hists__collapse_resort(struct hists *hists, bool threaded)
void hists__collapse_resort(struct hists *hists)
{
struct rb_root *root;
struct rb_node *next;
struct hist_entry *n;
if (!sort__need_collapse && !threaded)
if (!sort__need_collapse)
return;
root = hists__get_rotate_entries_in(hists);
@@ -641,16 +629,6 @@ static void __hists__collapse_resort(struct hists *hists, bool threaded)
}
}
void hists__collapse_resort(struct hists *hists)
{
return __hists__collapse_resort(hists, false);
}
void hists__collapse_resort_threaded(struct hists *hists)
{
return __hists__collapse_resort(hists, true);
}
/*
* reverse the map, sort on period.
*/
@@ -737,7 +715,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
rb_insert_color(&he->rb_node, entries);
}
static void __hists__output_resort(struct hists *hists, bool threaded)
void hists__output_resort(struct hists *hists)
{
struct rb_root *root;
struct rb_node *next;
@@ -746,7 +724,7 @@ static void __hists__output_resort(struct hists *hists, bool threaded)
min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
if (sort__need_collapse || threaded)
if (sort__need_collapse)
root = &hists->entries_collapsed;
else
root = hists->entries_in;
@@ -767,16 +745,6 @@ static void __hists__output_resort(struct hists *hists, bool threaded)
}
}
void hists__output_resort(struct hists *hists)
{
return __hists__output_resort(hists, false);
}
void hists__output_resort_threaded(struct hists *hists)
{
return __hists__output_resort(hists, true);
}
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
enum hist_filter filter)
{