Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "Here are the locking changes in this cycle:

   - rwsem unification and simpler micro-optimizations to prepare for
     more intrusive (and more lucrative) scalability improvements in
     v5.3 (Waiman Long)

   - Lockdep irq state tracking flag usage cleanups (Frederic
     Weisbecker)

   - static key improvements (Jakub Kicinski, Peter Zijlstra)

   - misc updates, cleanups and smaller fixes"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits)
  locking/lockdep: Remove unnecessary unlikely()
  locking/static_key: Don't take sleeping locks in __static_key_slow_dec_deferred()
  locking/static_key: Factor out the fast path of static_key_slow_dec()
  locking/static_key: Add support for deferred static branches
  locking/lockdep: Test all incompatible scenarios at once in check_irq_usage()
  locking/lockdep: Avoid bogus Clang warning
  locking/lockdep: Generate LOCKF_ bit composites
  locking/lockdep: Use expanded masks on find_usage_*() functions
  locking/lockdep: Map remaining magic numbers to lock usage mask names
  locking/lockdep: Move valid_state() inside CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING
  locking/rwsem: Prevent unneeded warning during locking selftest
  locking/rwsem: Optimize rwsem structure for uncontended lock acquisition
  locking/rwsem: Enable lock event counting
  locking/lock_events: Don't show pvqspinlock events on bare metal
  locking/lock_events: Make lock_events available for all archs & other locks
  locking/qspinlock_stat: Introduce generic lockevent_*() counting APIs
  locking/rwsem: Enhance DEBUG_RWSEMS_WARN_ON() macro
  locking/rwsem: Add debug check for __down_read*()
  locking/rwsem: Micro-optimize rwsem_try_read_lock_unqueued()
  locking/rwsem: Move rwsem internal function declarations to rwsem-xadd.h
  ...
This commit is contained in:
Linus Torvalds
2019-05-06 13:50:15 -07:00
62 changed files with 989 additions and 1931 deletions

View File

@@ -12,21 +12,79 @@ struct static_key_deferred {
struct delayed_work work;
};
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void static_key_deferred_flush(struct static_key_deferred *key);
struct static_key_true_deferred {
struct static_key_true key;
unsigned long timeout;
struct delayed_work work;
};
struct static_key_false_deferred {
struct static_key_false key;
unsigned long timeout;
struct delayed_work work;
};
#define static_key_slow_dec_deferred(x) \
__static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
#define static_branch_slow_dec_deferred(x) \
__static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
#define static_key_deferred_flush(x) \
__static_key_deferred_flush((x), &(x)->work)
extern void
__static_key_slow_dec_deferred(struct static_key *key,
struct delayed_work *work,
unsigned long timeout);
extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
extern void jump_label_update_timeout(struct work_struct *work);
#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
struct static_key_true_deferred name = { \
.key = { STATIC_KEY_INIT_TRUE }, \
.timeout = (rl), \
.work = __DELAYED_WORK_INITIALIZER((name).work, \
jump_label_update_timeout, \
0), \
}
#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
struct static_key_false_deferred name = { \
.key = { STATIC_KEY_INIT_FALSE }, \
.timeout = (rl), \
.work = __DELAYED_WORK_INITIALIZER((name).work, \
jump_label_update_timeout, \
0), \
}
#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key)
#else /* !CONFIG_JUMP_LABEL */
struct static_key_deferred {
struct static_key key;
};
struct static_key_true_deferred {
struct static_key_true key;
};
struct static_key_false_deferred {
struct static_key_false key;
};
#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT }
#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT }
#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key)
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
STATIC_KEY_CHECK_USE(key);
static_key_slow_dec(&key->key);
}
static inline void static_key_deferred_flush(struct static_key_deferred *key)
static inline void static_key_deferred_flush(void *key)
{
STATIC_KEY_CHECK_USE(key);
}

View File

@@ -476,7 +476,7 @@ struct pin_cookie { };
#define NIL_COOKIE (struct pin_cookie){ }
#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)

View File

@@ -1,47 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* rwsem-spinlock.h: fallback C implementation
*
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
* - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de>
* - Derived also from comments by Linus
*/
#ifndef _LINUX_RWSEM_SPINLOCK_H
#define _LINUX_RWSEM_SPINLOCK_H
#ifndef _LINUX_RWSEM_H
#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
#endif
#ifdef __KERNEL__
/*
* the rw-semaphore definition
* - if count is 0 then there are no active readers or writers
* - if count is +ve then that is the number of active readers
* - if count is -1 then there is one active writer
* - if wait_list is not empty, then there are processes waiting for the semaphore
*/
struct rw_semaphore {
__s32 count;
raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#define RWSEM_UNLOCKED_VALUE 0x00000000
extern void __down_read(struct rw_semaphore *sem);
extern int __must_check __down_read_killable(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem);
extern void __down_write(struct rw_semaphore *sem);
extern int __must_check __down_write_killable(struct rw_semaphore *sem);
extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
extern int rwsem_is_locked(struct rw_semaphore *sem);
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */

View File

@@ -20,25 +20,30 @@
#include <linux/osq_lock.h>
#endif
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
#include <linux/rwsem-spinlock.h> /* use a generic implementation */
#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE
#else
/* All arch specific implementations share the same struct */
/*
* For an uncontended rwsem, count and owner are the only fields a task
* needs to touch when acquiring the rwsem. So they are put next to each
* other to increase the chance that they will share the same cacheline.
*
* In a contended rwsem, the owner is likely the most frequently accessed
* field in the structure as the optimistic waiter that holds the osq lock
* will spin on owner. For an embedded rwsem, other hot fields in the
* containing structure should be moved further away from the rwsem to
* reduce the chance that they will share the same cacheline causing
* cacheline bouncing problem.
*/
struct rw_semaphore {
atomic_long_t count;
struct list_head wait_list;
raw_spinlock_t wait_lock;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* spinner MCS lock */
/*
* Write owner. Used as a speculative check to see
* if the owner is running on the cpu.
*/
struct task_struct *owner;
struct optimistic_spin_queue osq; /* spinner MCS lock */
#endif
raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
@@ -50,24 +55,14 @@ struct rw_semaphore {
*/
#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L)
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
/* Include the arch specific part */
#include <asm/rwsem.h>
/* In all implementations count != 0 means locked */
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return atomic_long_read(&sem->count) != 0;
}
#define RWSEM_UNLOCKED_VALUE 0L
#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
#endif
/* Common initializer macros and functions */