Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes in this cycle were:
- a big round of FUTEX_UNLOCK_PI improvements, fixes, cleanups and
general restructuring
- lockdep updates such as new checks for lock_downgrade()
- introduce the new atomic_try_cmpxchg() locking API and use it to
optimize refcount code generation
- ... plus misc fixes, updates and cleanups"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (38 commits)
MAINTAINERS: Add FUTEX SUBSYSTEM
futex: Clarify mark_wake_futex memory barrier usage
futex: Fix small (and harmless looking) inconsistencies
futex: Avoid freeing an active timer
rtmutex: Plug preempt count leak in rt_mutex_futex_unlock()
rtmutex: Fix more prio comparisons
rtmutex: Fix PI chain order integrity
sched,tracing: Update trace_sched_pi_setprio()
sched/rtmutex: Refactor rt_mutex_setprio()
rtmutex: Clean up
sched/deadline/rtmutex: Dont miss the dl_runtime/dl_period update
sched/rtmutex/deadline: Fix a PI crash for deadline tasks
rtmutex: Deboost before waking up the top waiter
locking/ww-mutex: Limit stress test to 2 seconds
locking/atomic: Fix atomic_try_cmpxchg() semantics
lockdep: Fix per-cpu static objects
futex: Drop hb->lock before enqueueing on the rtmutex
futex: Futex_unlock_pi() determinism
futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()
futex,rt_mutex: Restructure rt_mutex_finish_proxy_lock()
...
This commit is contained in:
@@ -186,6 +186,12 @@ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
return cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
#define atomic_try_cmpxchg atomic_try_cmpxchg
|
||||
static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
|
||||
{
|
||||
return try_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline int atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
@@ -201,16 +207,12 @@ static inline void atomic_##op(int i, atomic_t *v) \
|
||||
}
|
||||
|
||||
#define ATOMIC_FETCH_OP(op, c_op) \
|
||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
int old, val = atomic_read(v); \
|
||||
for (;;) { \
|
||||
old = atomic_cmpxchg(v, val, val c_op i); \
|
||||
if (old == val) \
|
||||
break; \
|
||||
val = old; \
|
||||
} \
|
||||
return old; \
|
||||
int val = atomic_read(v); \
|
||||
do { \
|
||||
} while (!atomic_try_cmpxchg(v, &val, val c_op i)); \
|
||||
return val; \
|
||||
}
|
||||
|
||||
#define ATOMIC_OPS(op, c_op) \
|
||||
@@ -236,16 +238,11 @@ ATOMIC_OPS(xor, ^)
|
||||
*/
|
||||
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
for (;;) {
|
||||
if (unlikely(c == (u)))
|
||||
int c = atomic_read(v);
|
||||
do {
|
||||
if (unlikely(c == u))
|
||||
break;
|
||||
old = atomic_cmpxchg((v), c, c + (a));
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
} while (!atomic_try_cmpxchg(v, &c, c + a));
|
||||
return c;
|
||||
}
|
||||
|
||||
|
||||
@@ -176,6 +176,12 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
|
||||
return cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
|
||||
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, long new)
|
||||
{
|
||||
return try_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline long atomic64_xchg(atomic64_t *v, long new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
@@ -192,17 +198,12 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
|
||||
*/
|
||||
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
|
||||
{
|
||||
long c, old;
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
if (unlikely(c == (u)))
|
||||
break;
|
||||
old = atomic64_cmpxchg((v), c, c + (a));
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
return c != (u);
|
||||
long c = atomic64_read(v);
|
||||
do {
|
||||
if (unlikely(c == u))
|
||||
return false;
|
||||
} while (!atomic64_try_cmpxchg(v, &c, c + a));
|
||||
return true;
|
||||
}
|
||||
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||
@@ -216,17 +217,12 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
|
||||
*/
|
||||
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long c, old, dec;
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
long dec, c = atomic64_read(v);
|
||||
do {
|
||||
dec = c - 1;
|
||||
if (unlikely(dec < 0))
|
||||
break;
|
||||
old = atomic64_cmpxchg((v), c, dec);
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
} while (!atomic64_try_cmpxchg(v, &c, dec));
|
||||
return dec;
|
||||
}
|
||||
|
||||
@@ -242,14 +238,10 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
|
||||
#define ATOMIC64_FETCH_OP(op, c_op) \
|
||||
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
long old, val = atomic64_read(v); \
|
||||
for (;;) { \
|
||||
old = atomic64_cmpxchg(v, val, val c_op i); \
|
||||
if (old == val) \
|
||||
break; \
|
||||
val = old; \
|
||||
} \
|
||||
return old; \
|
||||
long val = atomic64_read(v); \
|
||||
do { \
|
||||
} while (!atomic64_try_cmpxchg(v, &val, val c_op i)); \
|
||||
return val; \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OPS(op, c_op) \
|
||||
|
||||
@@ -153,6 +153,76 @@ extern void __add_wrong_size(void)
|
||||
#define cmpxchg_local(ptr, old, new) \
|
||||
__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
|
||||
|
||||
|
||||
#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \
|
||||
({ \
|
||||
bool success; \
|
||||
__typeof__(_ptr) _old = (_pold); \
|
||||
__typeof__(*(_ptr)) __old = *_old; \
|
||||
__typeof__(*(_ptr)) __new = (_new); \
|
||||
switch (size) { \
|
||||
case __X86_CASE_B: \
|
||||
{ \
|
||||
volatile u8 *__ptr = (volatile u8 *)(_ptr); \
|
||||
asm volatile(lock "cmpxchgb %[new], %[ptr]" \
|
||||
CC_SET(z) \
|
||||
: CC_OUT(z) (success), \
|
||||
[ptr] "+m" (*__ptr), \
|
||||
[old] "+a" (__old) \
|
||||
: [new] "q" (__new) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_W: \
|
||||
{ \
|
||||
volatile u16 *__ptr = (volatile u16 *)(_ptr); \
|
||||
asm volatile(lock "cmpxchgw %[new], %[ptr]" \
|
||||
CC_SET(z) \
|
||||
: CC_OUT(z) (success), \
|
||||
[ptr] "+m" (*__ptr), \
|
||||
[old] "+a" (__old) \
|
||||
: [new] "r" (__new) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_L: \
|
||||
{ \
|
||||
volatile u32 *__ptr = (volatile u32 *)(_ptr); \
|
||||
asm volatile(lock "cmpxchgl %[new], %[ptr]" \
|
||||
CC_SET(z) \
|
||||
: CC_OUT(z) (success), \
|
||||
[ptr] "+m" (*__ptr), \
|
||||
[old] "+a" (__old) \
|
||||
: [new] "r" (__new) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_Q: \
|
||||
{ \
|
||||
volatile u64 *__ptr = (volatile u64 *)(_ptr); \
|
||||
asm volatile(lock "cmpxchgq %[new], %[ptr]" \
|
||||
CC_SET(z) \
|
||||
: CC_OUT(z) (success), \
|
||||
[ptr] "+m" (*__ptr), \
|
||||
[old] "+a" (__old) \
|
||||
: [new] "r" (__new) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
default: \
|
||||
__cmpxchg_wrong_size(); \
|
||||
} \
|
||||
if (unlikely(!success)) \
|
||||
*_old = __old; \
|
||||
likely(success); \
|
||||
})
|
||||
|
||||
#define __try_cmpxchg(ptr, pold, new, size) \
|
||||
__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
|
||||
|
||||
#define try_cmpxchg(ptr, pold, new) \
|
||||
__try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* xadd() adds "inc" to "*ptr" and atomically returns the previous
|
||||
* value of "*ptr".
|
||||
|
||||
Reference in New Issue
Block a user