Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The main changes in this cycle were: - a big round of FUTEX_UNLOCK_PI improvements, fixes, cleanups and general restructuring - lockdep updates such as new checks for lock_downgrade() - introduce the new atomic_try_cmpxchg() locking API and use it to optimize refcount code generation - ... plus misc fixes, updates and cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (38 commits) MAINTAINERS: Add FUTEX SUBSYSTEM futex: Clarify mark_wake_futex memory barrier usage futex: Fix small (and harmless looking) inconsistencies futex: Avoid freeing an active timer rtmutex: Plug preempt count leak in rt_mutex_futex_unlock() rtmutex: Fix more prio comparisons rtmutex: Fix PI chain order integrity sched,tracing: Update trace_sched_pi_setprio() sched/rtmutex: Refactor rt_mutex_setprio() rtmutex: Clean up sched/deadline/rtmutex: Dont miss the dl_runtime/dl_period update sched/rtmutex/deadline: Fix a PI crash for deadline tasks rtmutex: Deboost before waking up the top waiter locking/ww-mutex: Limit stress test to 2 seconds locking/atomic: Fix atomic_try_cmpxchg() semantics lockdep: Fix per-cpu static objects futex: Drop hb->lock before enqueueing on the rtmutex futex: Futex_unlock_pi() determinism futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock() futex,rt_mutex: Restructure rt_mutex_finish_proxy_lock() ...
This commit is contained in:
169
lib/refcount.c
169
lib/refcount.c
@@ -37,11 +37,29 @@
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
/**
|
||||
* refcount_add_not_zero - add a value to a refcount unless it is 0
|
||||
* @i: the value to add to the refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
* Will saturate at UINT_MAX and WARN.
|
||||
*
|
||||
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
||||
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
||||
* and thereby orders future stores. See the comment on top.
|
||||
*
|
||||
* Use of this function is not recommended for the normal reference counting
|
||||
* use case in which references are taken and released one at a time. In these
|
||||
* cases, refcount_inc(), or one of its variants, should instead be used to
|
||||
* increment a reference count.
|
||||
*
|
||||
* Return: false if the passed refcount is 0, true otherwise
|
||||
*/
|
||||
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
|
||||
{
|
||||
unsigned int old, new, val = atomic_read(&r->refs);
|
||||
unsigned int new, val = atomic_read(&r->refs);
|
||||
|
||||
for (;;) {
|
||||
do {
|
||||
if (!val)
|
||||
return false;
|
||||
|
||||
@@ -51,12 +69,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
|
||||
new = val + i;
|
||||
if (new < val)
|
||||
new = UINT_MAX;
|
||||
old = atomic_cmpxchg_relaxed(&r->refs, val, new);
|
||||
if (old == val)
|
||||
break;
|
||||
|
||||
val = old;
|
||||
}
|
||||
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
||||
|
||||
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
|
||||
|
||||
@@ -64,24 +78,45 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_add_not_zero);
|
||||
|
||||
/**
|
||||
* refcount_add - add a value to a refcount
|
||||
* @i: the value to add to the refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
* Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
|
||||
*
|
||||
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
||||
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
||||
* and thereby orders future stores. See the comment on top.
|
||||
*
|
||||
* Use of this function is not recommended for the normal reference counting
|
||||
* use case in which references are taken and released one at a time. In these
|
||||
* cases, refcount_inc(), or one of its variants, should instead be used to
|
||||
* increment a reference count.
|
||||
*/
|
||||
void refcount_add(unsigned int i, refcount_t *r)
|
||||
{
|
||||
WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_add);
|
||||
|
||||
/*
|
||||
* Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
|
||||
/**
|
||||
* refcount_inc_not_zero - increment a refcount unless it is 0
|
||||
* @r: the refcount to increment
|
||||
*
|
||||
* Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
|
||||
*
|
||||
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
||||
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
||||
* and thereby orders future stores. See the comment on top.
|
||||
*
|
||||
* Return: true if the increment was successful, false otherwise
|
||||
*/
|
||||
bool refcount_inc_not_zero(refcount_t *r)
|
||||
{
|
||||
unsigned int old, new, val = atomic_read(&r->refs);
|
||||
unsigned int new, val = atomic_read(&r->refs);
|
||||
|
||||
for (;;) {
|
||||
do {
|
||||
new = val + 1;
|
||||
|
||||
if (!val)
|
||||
@@ -90,12 +125,7 @@ bool refcount_inc_not_zero(refcount_t *r)
|
||||
if (unlikely(!new))
|
||||
return true;
|
||||
|
||||
old = atomic_cmpxchg_relaxed(&r->refs, val, new);
|
||||
if (old == val)
|
||||
break;
|
||||
|
||||
val = old;
|
||||
}
|
||||
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
||||
|
||||
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
|
||||
|
||||
@@ -103,11 +133,17 @@ bool refcount_inc_not_zero(refcount_t *r)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
|
||||
|
||||
/*
|
||||
* Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
|
||||
/**
|
||||
* refcount_inc - increment a refcount
|
||||
* @r: the refcount to increment
|
||||
*
|
||||
* Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
|
||||
*
|
||||
* Provides no memory ordering, it is assumed the caller already has a
|
||||
* reference on the object, will WARN when this is not so.
|
||||
* reference on the object.
|
||||
*
|
||||
* Will WARN if the refcount is 0, as this represents a possible use-after-free
|
||||
* condition.
|
||||
*/
|
||||
void refcount_inc(refcount_t *r)
|
||||
{
|
||||
@@ -115,11 +151,31 @@ void refcount_inc(refcount_t *r)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_inc);
|
||||
|
||||
/**
|
||||
* refcount_sub_and_test - subtract from a refcount and test if it is 0
|
||||
* @i: amount to subtract from the refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
* Similar to atomic_dec_and_test(), but it will WARN, return false and
|
||||
* ultimately leak on underflow and will fail to decrement when saturated
|
||||
* at UINT_MAX.
|
||||
*
|
||||
* Provides release memory ordering, such that prior loads and stores are done
|
||||
* before, and provides a control dependency such that free() must come after.
|
||||
* See the comment on top.
|
||||
*
|
||||
* Use of this function is not recommended for the normal reference counting
|
||||
* use case in which references are taken and released one at a time. In these
|
||||
* cases, refcount_dec(), or one of its variants, should instead be used to
|
||||
* decrement a reference count.
|
||||
*
|
||||
* Return: true if the resulting refcount is 0, false otherwise
|
||||
*/
|
||||
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
|
||||
{
|
||||
unsigned int old, new, val = atomic_read(&r->refs);
|
||||
unsigned int new, val = atomic_read(&r->refs);
|
||||
|
||||
for (;;) {
|
||||
do {
|
||||
if (unlikely(val == UINT_MAX))
|
||||
return false;
|
||||
|
||||
@@ -129,24 +185,24 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
|
||||
return false;
|
||||
}
|
||||
|
||||
old = atomic_cmpxchg_release(&r->refs, val, new);
|
||||
if (old == val)
|
||||
break;
|
||||
|
||||
val = old;
|
||||
}
|
||||
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
|
||||
|
||||
return !new;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_sub_and_test);
|
||||
|
||||
/*
|
||||
/**
|
||||
* refcount_dec_and_test - decrement a refcount and test if it is 0
|
||||
* @r: the refcount
|
||||
*
|
||||
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
|
||||
* decrement when saturated at UINT_MAX.
|
||||
*
|
||||
* Provides release memory ordering, such that prior loads and stores are done
|
||||
* before, and provides a control dependency such that free() must come after.
|
||||
* See the comment on top.
|
||||
*
|
||||
* Return: true if the resulting refcount is 0, false otherwise
|
||||
*/
|
||||
bool refcount_dec_and_test(refcount_t *r)
|
||||
{
|
||||
@@ -154,21 +210,26 @@ bool refcount_dec_and_test(refcount_t *r)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_dec_and_test);
|
||||
|
||||
/*
|
||||
/**
|
||||
* refcount_dec - decrement a refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
|
||||
* when saturated at UINT_MAX.
|
||||
*
|
||||
* Provides release memory ordering, such that prior loads and stores are done
|
||||
* before.
|
||||
*/
|
||||
|
||||
void refcount_dec(refcount_t *r)
|
||||
{
|
||||
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_dec);
|
||||
|
||||
/*
|
||||
/**
|
||||
* refcount_dec_if_one - decrement a refcount if it is 1
|
||||
* @r: the refcount
|
||||
*
|
||||
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
|
||||
* success thereof.
|
||||
*
|
||||
@@ -178,24 +239,33 @@ EXPORT_SYMBOL_GPL(refcount_dec);
|
||||
* It can be used like a try-delete operator; this explicit case is provided
|
||||
* and not cmpxchg in generic, because that would allow implementing unsafe
|
||||
* operations.
|
||||
*
|
||||
* Return: true if the resulting refcount is 0, false otherwise
|
||||
*/
|
||||
bool refcount_dec_if_one(refcount_t *r)
|
||||
{
|
||||
return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
|
||||
int val = 1;
|
||||
|
||||
return atomic_try_cmpxchg_release(&r->refs, &val, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_dec_if_one);
|
||||
|
||||
/*
|
||||
/**
|
||||
* refcount_dec_not_one - decrement a refcount if it is not 1
|
||||
* @r: the refcount
|
||||
*
|
||||
* No atomic_t counterpart, it decrements unless the value is 1, in which case
|
||||
* it will return false.
|
||||
*
|
||||
* Was often done like: atomic_add_unless(&var, -1, 1)
|
||||
*
|
||||
* Return: true if the decrement operation was successful, false otherwise
|
||||
*/
|
||||
bool refcount_dec_not_one(refcount_t *r)
|
||||
{
|
||||
unsigned int old, new, val = atomic_read(&r->refs);
|
||||
unsigned int new, val = atomic_read(&r->refs);
|
||||
|
||||
for (;;) {
|
||||
do {
|
||||
if (unlikely(val == UINT_MAX))
|
||||
return true;
|
||||
|
||||
@@ -208,24 +278,27 @@ bool refcount_dec_not_one(refcount_t *r)
|
||||
return true;
|
||||
}
|
||||
|
||||
old = atomic_cmpxchg_release(&r->refs, val, new);
|
||||
if (old == val)
|
||||
break;
|
||||
|
||||
val = old;
|
||||
}
|
||||
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_dec_not_one);
|
||||
|
||||
/*
|
||||
/**
|
||||
* refcount_dec_and_mutex_lock - return holding mutex if able to decrement
|
||||
* refcount to 0
|
||||
* @r: the refcount
|
||||
* @lock: the mutex to be locked
|
||||
*
|
||||
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
|
||||
* to decrement when saturated at UINT_MAX.
|
||||
*
|
||||
* Provides release memory ordering, such that prior loads and stores are done
|
||||
* before, and provides a control dependency such that free() must come after.
|
||||
* See the comment on top.
|
||||
*
|
||||
* Return: true and hold mutex if able to decrement refcount to 0, false
|
||||
* otherwise
|
||||
*/
|
||||
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
|
||||
{
|
||||
@@ -242,13 +315,21 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
|
||||
|
||||
/*
|
||||
/**
|
||||
* refcount_dec_and_lock - return holding spinlock if able to decrement
|
||||
* refcount to 0
|
||||
* @r: the refcount
|
||||
* @lock: the spinlock to be locked
|
||||
*
|
||||
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
|
||||
* decrement when saturated at UINT_MAX.
|
||||
*
|
||||
* Provides release memory ordering, such that prior loads and stores are done
|
||||
* before, and provides a control dependency such that free() must come after.
|
||||
* See the comment on top.
|
||||
*
|
||||
* Return: true and hold spinlock if able to decrement refcount to 0, false
|
||||
* otherwise
|
||||
*/
|
||||
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
|
||||
{
|
||||
|
Referens i nytt ärende
Block a user