Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The main changes in this cycle were: - Continued user-access cleanups in the futex code. - percpu-rwsem rewrite that uses its own waitqueue and atomic_t instead of an embedded rwsem. This addresses a couple of weaknesses, but the primary motivation was complications on the -rt kernel. - Introduce raw lock nesting detection on lockdep (CONFIG_PROVE_RAW_LOCK_NESTING=y), document the raw_lock vs. normal lock differences. This too originates from -rt. - Reuse lockdep zapped chain_hlocks entries, to conserve RAM footprint on distro-ish kernels running into the "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" depletion of the lockdep chain-entries pool. - Misc cleanups, smaller fixes and enhancements - see the changelog for details" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (55 commits) fs/buffer: Make BH_Uptodate_Lock bit_spin_lock a regular spinlock_t thermal/x86_pkg_temp: Make pkg_temp_lock a raw_spinlock_t Documentation/locking/locktypes: Minor copy editor fixes Documentation/locking/locktypes: Further clarifications and wordsmithing m68knommu: Remove mm.h include from uaccess_no.h x86: get rid of user_atomic_cmpxchg_inatomic() generic arch_futex_atomic_op_inuser() doesn't need access_ok() x86: don't reload after cmpxchg in unsafe_atomic_op2() loop x86: convert arch_futex_atomic_op_inuser() to user_access_begin/user_access_end() objtool: whitelist __sanitizer_cov_trace_switch() [parisc, s390, sparc64] no need for access_ok() in futex handling sh: no need of access_ok() in arch_futex_atomic_op_inuser() futex: arch_futex_atomic_op_inuser() calling conventions change completion: Use lockdep_assert_RT_in_threaded_ctx() in complete_all() lockdep: Add posixtimer context tracing bits lockdep: Annotate irq_work lockdep: Add hrtimer context tracing bits lockdep: Introduce wait-type checks completion: Use simple wait queues sched/swait: Prepare usage in completions ...
This commit is contained in:
107
kernel/futex.c
107
kernel/futex.c
@@ -135,8 +135,7 @@
|
||||
*
|
||||
* Where (A) orders the waiters increment and the futex value read through
|
||||
* atomic operations (see hb_waiters_inc) and where (B) orders the write
|
||||
* to futex and the waiters read -- this is done by the barriers for both
|
||||
* shared and private futexes in get_futex_key_refs().
|
||||
* to futex and the waiters read (see hb_waiters_pending()).
|
||||
*
|
||||
* This yields the following case (where X:=waiters, Y:=futex):
|
||||
*
|
||||
@@ -331,17 +330,6 @@ static void compat_exit_robust_list(struct task_struct *curr);
|
||||
static inline void compat_exit_robust_list(struct task_struct *curr) { }
|
||||
#endif
|
||||
|
||||
static inline void futex_get_mm(union futex_key *key)
|
||||
{
|
||||
mmgrab(key->private.mm);
|
||||
/*
|
||||
* Ensure futex_get_mm() implies a full barrier such that
|
||||
* get_futex_key() implies a full barrier. This is relied upon
|
||||
* as smp_mb(); (B), see the ordering comment above.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
/*
|
||||
* Reflects a new waiter being added to the waitqueue.
|
||||
*/
|
||||
@@ -370,6 +358,10 @@ static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
|
||||
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Full barrier (B), see the ordering comment above.
|
||||
*/
|
||||
smp_mb();
|
||||
return atomic_read(&hb->waiters);
|
||||
#else
|
||||
return 1;
|
||||
@@ -407,69 +399,6 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
|
||||
&& key1->both.offset == key2->both.offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Take a reference to the resource addressed by a key.
|
||||
* Can be called while holding spinlocks.
|
||||
*
|
||||
*/
|
||||
static void get_futex_key_refs(union futex_key *key)
|
||||
{
|
||||
if (!key->both.ptr)
|
||||
return;
|
||||
|
||||
/*
|
||||
* On MMU less systems futexes are always "private" as there is no per
|
||||
* process address space. We need the smp wmb nevertheless - yes,
|
||||
* arch/blackfin has MMU less SMP ...
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_MMU)) {
|
||||
smp_mb(); /* explicit smp_mb(); (B) */
|
||||
return;
|
||||
}
|
||||
|
||||
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
||||
case FUT_OFF_INODE:
|
||||
smp_mb(); /* explicit smp_mb(); (B) */
|
||||
break;
|
||||
case FUT_OFF_MMSHARED:
|
||||
futex_get_mm(key); /* implies smp_mb(); (B) */
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* Private futexes do not hold reference on an inode or
|
||||
* mm, therefore the only purpose of calling get_futex_key_refs
|
||||
* is because we need the barrier for the lockless waiter check.
|
||||
*/
|
||||
smp_mb(); /* explicit smp_mb(); (B) */
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop a reference to the resource addressed by a key.
|
||||
* The hash bucket spinlock must not be held. This is
|
||||
* a no-op for private futexes, see comment in the get
|
||||
* counterpart.
|
||||
*/
|
||||
static void drop_futex_key_refs(union futex_key *key)
|
||||
{
|
||||
if (!key->both.ptr) {
|
||||
/* If we're here then we tried to put a key we failed to get */
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MMU))
|
||||
return;
|
||||
|
||||
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
||||
case FUT_OFF_INODE:
|
||||
break;
|
||||
case FUT_OFF_MMSHARED:
|
||||
mmdrop(key->private.mm);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
enum futex_access {
|
||||
FUTEX_READ,
|
||||
FUTEX_WRITE
|
||||
@@ -601,7 +530,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
|
||||
if (!fshared) {
|
||||
key->private.mm = mm;
|
||||
key->private.address = address;
|
||||
get_futex_key_refs(key); /* implies smp_mb(); (B) */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -741,8 +669,6 @@ again:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
get_futex_key_refs(key); /* implies smp_mb(); (B) */
|
||||
|
||||
out:
|
||||
put_page(page);
|
||||
return err;
|
||||
@@ -750,7 +676,6 @@ out:
|
||||
|
||||
static inline void put_futex_key(union futex_key *key)
|
||||
{
|
||||
drop_futex_key_refs(key);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1740,10 +1665,9 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
|
||||
oparg = 1 << oparg;
|
||||
}
|
||||
|
||||
if (!access_ok(uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
|
||||
pagefault_enable();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1885,7 +1809,6 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
|
||||
plist_add(&q->list, &hb2->chain);
|
||||
q->lock_ptr = &hb2->lock;
|
||||
}
|
||||
get_futex_key_refs(key2);
|
||||
q->key = *key2;
|
||||
}
|
||||
|
||||
@@ -1907,7 +1830,6 @@ static inline
|
||||
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
|
||||
struct futex_hash_bucket *hb)
|
||||
{
|
||||
get_futex_key_refs(key);
|
||||
q->key = *key;
|
||||
|
||||
__unqueue_futex(q);
|
||||
@@ -2018,7 +1940,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||||
u32 *cmpval, int requeue_pi)
|
||||
{
|
||||
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
|
||||
int drop_count = 0, task_count = 0, ret;
|
||||
int task_count = 0, ret;
|
||||
struct futex_pi_state *pi_state = NULL;
|
||||
struct futex_hash_bucket *hb1, *hb2;
|
||||
struct futex_q *this, *next;
|
||||
@@ -2139,7 +2061,6 @@ retry_private:
|
||||
*/
|
||||
if (ret > 0) {
|
||||
WARN_ON(pi_state);
|
||||
drop_count++;
|
||||
task_count++;
|
||||
/*
|
||||
* If we acquired the lock, then the user space value
|
||||
@@ -2259,7 +2180,6 @@ retry_private:
|
||||
* doing so.
|
||||
*/
|
||||
requeue_pi_wake_futex(this, &key2, hb2);
|
||||
drop_count++;
|
||||
continue;
|
||||
} else if (ret) {
|
||||
/*
|
||||
@@ -2280,7 +2200,6 @@ retry_private:
|
||||
}
|
||||
}
|
||||
requeue_futex(this, hb1, hb2, &key2);
|
||||
drop_count++;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2295,15 +2214,6 @@ out_unlock:
|
||||
wake_up_q(&wake_q);
|
||||
hb_waiters_dec(hb2);
|
||||
|
||||
/*
|
||||
* drop_futex_key_refs() must be called outside the spinlocks. During
|
||||
* the requeue we moved futex_q's from the hash bucket at key1 to the
|
||||
* one at key2 and updated their key pointer. We no longer need to
|
||||
* hold the references to key1.
|
||||
*/
|
||||
while (--drop_count >= 0)
|
||||
drop_futex_key_refs(&key1);
|
||||
|
||||
out_put_keys:
|
||||
put_futex_key(&key2);
|
||||
out_put_key1:
|
||||
@@ -2433,7 +2343,6 @@ retry:
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
drop_futex_key_refs(&q->key);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user