Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (162 commits) tracing/kprobes: unregister_trace_probe needs to be called under mutex perf: expose event__process function perf events: Fix mmap offset determination perf, powerpc: fsl_emb: Restore setting perf_sample_data.period perf, powerpc: Convert the FSL driver to use local64_t perf tools: Don't keep unreferenced maps when unmaps are detected perf session: Invalidate last_match when removing threads from rb_tree perf session: Free the ref_reloc_sym memory at the right place x86,mmiotrace: Add support for tracing STOS instruction perf, sched migration: Librarize task states and event headers helpers perf, sched migration: Librarize the GUI class perf, sched migration: Make the GUI class client agnostic perf, sched migration: Make it vertically scrollable perf, sched migration: Parameterize cpu height and spacing perf, sched migration: Fix key bindings perf, sched migration: Ignore unhandled task states perf, sched migration: Handle ignored migrate out events perf: New migration tool overview tracing: Drop cpparg() macro perf: Use tracepoint_synchronize_unregister() to flush any pending tracepoint call ... Fix up trivial conflicts in Makefile and drivers/cpufreq/cpufreq.c
This commit is contained in:
96
include/asm-generic/local64.h
Normal file
96
include/asm-generic/local64.h
Normal file
@@ -0,0 +1,96 @@
|
||||
#ifndef _ASM_GENERIC_LOCAL64_H
|
||||
#define _ASM_GENERIC_LOCAL64_H
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
/*
|
||||
* A signed long type for operations which are atomic for a single CPU.
|
||||
* Usually used in combination with per-cpu variables.
|
||||
*
|
||||
* This is the default implementation, which uses atomic64_t. Which is
|
||||
* rather pointless. The whole point behind local64_t is that some processors
|
||||
* can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
|
||||
* running on this CPU. local64_t allows exploitation of such capabilities.
|
||||
*/
|
||||
|
||||
/* Implement in terms of atomics. */
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
|
||||
#include <asm/local.h>
|
||||
|
||||
typedef struct {
|
||||
local_t a;
|
||||
} local64_t;
|
||||
|
||||
#define LOCAL64_INIT(i) { LOCAL_INIT(i) }
|
||||
|
||||
#define local64_read(l) local_read(&(l)->a)
|
||||
#define local64_set(l,i) local_set((&(l)->a),(i))
|
||||
#define local64_inc(l) local_inc(&(l)->a)
|
||||
#define local64_dec(l) local_dec(&(l)->a)
|
||||
#define local64_add(i,l) local_add((i),(&(l)->a))
|
||||
#define local64_sub(i,l) local_sub((i),(&(l)->a))
|
||||
|
||||
#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
|
||||
#define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
|
||||
#define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
|
||||
#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
|
||||
#define local64_add_return(i, l) local_add_return((i), (&(l)->a))
|
||||
#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
|
||||
#define local64_inc_return(l) local_inc_return(&(l)->a)
|
||||
|
||||
#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
|
||||
#define local64_xchg(l, n) local_xchg((&(l)->a), (n))
|
||||
#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
|
||||
#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
|
||||
|
||||
/* Non-atomic variants, ie. preemption disabled and won't be touched
|
||||
* in interrupt, etc. Some archs can optimize this case well. */
|
||||
#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
|
||||
#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
|
||||
#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
|
||||
#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
|
||||
|
||||
#else /* BITS_PER_LONG != 64 */
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/* Don't use typedef: don't want them to be mixed with atomic_t's. */
|
||||
typedef struct {
|
||||
atomic64_t a;
|
||||
} local64_t;
|
||||
|
||||
#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) }
|
||||
|
||||
#define local64_read(l) atomic64_read(&(l)->a)
|
||||
#define local64_set(l,i) atomic64_set((&(l)->a),(i))
|
||||
#define local64_inc(l) atomic64_inc(&(l)->a)
|
||||
#define local64_dec(l) atomic64_dec(&(l)->a)
|
||||
#define local64_add(i,l) atomic64_add((i),(&(l)->a))
|
||||
#define local64_sub(i,l) atomic64_sub((i),(&(l)->a))
|
||||
|
||||
#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
|
||||
#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
|
||||
#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
|
||||
#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
|
||||
#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
|
||||
#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
|
||||
#define local64_inc_return(l) atomic64_inc_return(&(l)->a)
|
||||
|
||||
#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
|
||||
#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
|
||||
#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
|
||||
#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
|
||||
|
||||
/* Non-atomic variants, ie. preemption disabled and won't be touched
|
||||
* in interrupt, etc. Some archs can optimize this case well. */
|
||||
#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
|
||||
#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
|
||||
#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
|
||||
#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
|
||||
|
||||
#endif /* BITS_PER_LONG != 64 */
|
||||
|
||||
#endif /* _ASM_GENERIC_LOCAL64_H */
|
||||
@@ -156,10 +156,6 @@
|
||||
CPU_KEEP(exit.data) \
|
||||
MEM_KEEP(init.data) \
|
||||
MEM_KEEP(exit.data) \
|
||||
. = ALIGN(8); \
|
||||
VMLINUX_SYMBOL(__start___markers) = .; \
|
||||
*(__markers) \
|
||||
VMLINUX_SYMBOL(__stop___markers) = .; \
|
||||
. = ALIGN(32); \
|
||||
VMLINUX_SYMBOL(__start___tracepoints) = .; \
|
||||
*(__tracepoints) \
|
||||
|
||||
Reference in New Issue
Block a user