Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits)
  m68k: rename global variable vmalloc_end to m68k_vmalloc_end
  percpu: add missing per_cpu_ptr_to_phys() definition for UP
  percpu: Fix kdump failure if booted with percpu_alloc=page
  percpu: make misc percpu symbols unique
  percpu: make percpu symbols in ia64 unique
  percpu: make percpu symbols in powerpc unique
  percpu: make percpu symbols in x86 unique
  percpu: make percpu symbols in xen unique
  percpu: make percpu symbols in cpufreq unique
  percpu: make percpu symbols in oprofile unique
  percpu: make percpu symbols in tracer unique
  percpu: make percpu symbols under kernel/ and mm/ unique
  percpu: remove some sparse warnings
  percpu: make alloc_percpu() handle array types
  vmalloc: fix use of non-existent percpu variable in put_cpu_var()
  this_cpu: Use this_cpu_xx in trace_functions_graph.c
  this_cpu: Use this_cpu_xx for ftrace
  this_cpu: Use this_cpu_xx in nmi handling
  this_cpu: Use this_cpu operations in RCU
  this_cpu: Use this_cpu ops for VM statistics
  ...

Fix up trivial (famous last words) global per-cpu naming conflicts in
	arch/x86/kvm/svm.c
	mm/slab.c
This commit is contained in:
Linus Torvalds
2009-12-14 09:58:24 -08:00
79 changed files with 1222 additions and 978 deletions

View File

@@ -20,10 +20,10 @@
#define BTS_BUFFER_SIZE (1 << 13)
static DEFINE_PER_CPU(struct bts_tracer *, tracer);
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
#define this_tracer per_cpu(tracer, smp_processor_id())
#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
static int trace_hw_branches_enabled __read_mostly;
static int trace_hw_branches_suspended __read_mostly;
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly;
static void bts_trace_init_cpu(int cpu)
{
per_cpu(tracer, cpu) =
ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE,
NULL, (size_t)-1, BTS_KERNEL);
per_cpu(hwb_tracer, cpu) =
ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
BTS_BUFFER_SIZE, NULL, (size_t)-1,
BTS_KERNEL);
if (IS_ERR(per_cpu(tracer, cpu)))
per_cpu(tracer, cpu) = NULL;
if (IS_ERR(per_cpu(hwb_tracer, cpu)))
per_cpu(hwb_tracer, cpu) = NULL;
}
static int bts_trace_init(struct trace_array *tr)
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr)
for_each_online_cpu(cpu) {
bts_trace_init_cpu(cpu);
if (likely(per_cpu(tracer, cpu)))
if (likely(per_cpu(hwb_tracer, cpu)))
trace_hw_branches_enabled = 1;
}
trace_hw_branches_suspended = 0;
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr)
get_online_cpus();
for_each_online_cpu(cpu) {
if (likely(per_cpu(tracer, cpu))) {
ds_release_bts(per_cpu(tracer, cpu));
per_cpu(tracer, cpu) = NULL;
if (likely(per_cpu(hwb_tracer, cpu))) {
ds_release_bts(per_cpu(hwb_tracer, cpu));
per_cpu(hwb_tracer, cpu) = NULL;
}
}
trace_hw_branches_enabled = 0;
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr)
get_online_cpus();
for_each_online_cpu(cpu)
if (likely(per_cpu(tracer, cpu)))
ds_resume_bts(per_cpu(tracer, cpu));
if (likely(per_cpu(hwb_tracer, cpu)))
ds_resume_bts(per_cpu(hwb_tracer, cpu));
trace_hw_branches_suspended = 0;
put_online_cpus();
}
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr)
get_online_cpus();
for_each_online_cpu(cpu)
if (likely(per_cpu(tracer, cpu)))
ds_suspend_bts(per_cpu(tracer, cpu));
if (likely(per_cpu(hwb_tracer, cpu)))
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
trace_hw_branches_suspended = 1;
put_online_cpus();
}
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
bts_trace_init_cpu(cpu);
if (trace_hw_branches_suspended &&
likely(per_cpu(tracer, cpu)))
ds_suspend_bts(per_cpu(tracer, cpu));
likely(per_cpu(hwb_tracer, cpu)))
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
}
break;
case CPU_DOWN_PREPARE:
/* The notification is sent with interrupts enabled. */
if (likely(per_cpu(tracer, cpu))) {
ds_release_bts(per_cpu(tracer, cpu));
per_cpu(tracer, cpu) = NULL;
if (likely(per_cpu(hwb_tracer, cpu))) {
ds_release_bts(per_cpu(hwb_tracer, cpu));
per_cpu(hwb_tracer, cpu) = NULL;
}
}
@@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
get_online_cpus();
for_each_online_cpu(cpu)
if (likely(per_cpu(tracer, cpu)))
ds_suspend_bts(per_cpu(tracer, cpu));
if (likely(per_cpu(hwb_tracer, cpu)))
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
/*
* We need to collect the trace on the respective cpu since ftrace
* implicitly adds the record for the current cpu.
@@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
on_each_cpu(trace_bts_cpu, iter->tr, 1);
for_each_online_cpu(cpu)
if (likely(per_cpu(tracer, cpu)))
ds_resume_bts(per_cpu(tracer, cpu));
if (likely(per_cpu(hwb_tracer, cpu)))
ds_resume_bts(per_cpu(hwb_tracer, cpu));
put_online_cpus();
}