Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar: "Lots of changes in this cycle - in part because locking/core attracted a number of related x86 low level work which was easier to handle in a single tree: - Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E. McKenney, Andrea Parri) - lockdep scalability improvements and micro-optimizations (Waiman Long) - rwsem improvements (Waiman Long) - spinlock micro-optimization (Matthew Wilcox) - qspinlocks: Provide a liveness guarantee (more fairness) on x86. (Peter Zijlstra) - Add support for relative references in jump tables on arm64, x86 and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens) - Be a lot less permissive on weird (kernel address) uaccess faults on x86: BUG() when uaccess helpers fault on kernel addresses (Jann Horn) - macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav Amit) - ... and a handful of other smaller changes as well" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits) locking/lockdep: Make global debug_locks* variables read-mostly locking/lockdep: Fix debug_locks off performance problem locking/pvqspinlock: Extend node size when pvqspinlock is configured locking/qspinlock_stat: Count instances of nested lock slowpaths locking/qspinlock, x86: Provide liveness guarantee x86/asm: 'Simplify' GEN_*_RMWcc() macros locking/qspinlock: Rework some comments locking/qspinlock: Re-order code locking/lockdep: Remove duplicated 'lock_class_ops' percpu array x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y futex: Replace spin_is_locked() with lockdep locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs x86/extable: Macrofy inline assembly code to work around GCC inlining bugs x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs x86/refcount: Work around GCC inlining bug x86/objtool: Use asm macros to work around GCC inlining bugs ...
This commit is contained in:
@@ -99,22 +99,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
* unique, to convince GCC not to merge duplicate inline asm statements.
|
||||
*/
|
||||
#define annotate_reachable() ({ \
|
||||
asm volatile("%c0:\n\t" \
|
||||
".pushsection .discard.reachable\n\t" \
|
||||
".long %c0b - .\n\t" \
|
||||
".popsection\n\t" : : "i" (__COUNTER__)); \
|
||||
asm volatile("ANNOTATE_REACHABLE counter=%c0" \
|
||||
: : "i" (__COUNTER__)); \
|
||||
})
|
||||
#define annotate_unreachable() ({ \
|
||||
asm volatile("%c0:\n\t" \
|
||||
".pushsection .discard.unreachable\n\t" \
|
||||
".long %c0b - .\n\t" \
|
||||
".popsection\n\t" : : "i" (__COUNTER__)); \
|
||||
asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \
|
||||
: : "i" (__COUNTER__)); \
|
||||
})
|
||||
#define ASM_UNREACHABLE \
|
||||
"999:\n\t" \
|
||||
".pushsection .discard.unreachable\n\t" \
|
||||
".long 999b - .\n\t" \
|
||||
".popsection\n\t"
|
||||
#else
|
||||
#define annotate_reachable()
|
||||
#define annotate_unreachable()
|
||||
@@ -299,6 +290,45 @@ static inline void *offset_to_ptr(const int *off)
|
||||
return (void *)((unsigned long)off + *off);
|
||||
}
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifndef LINKER_SCRIPT
|
||||
|
||||
#ifdef CONFIG_STACK_VALIDATION
|
||||
.macro ANNOTATE_UNREACHABLE counter:req
|
||||
\counter:
|
||||
.pushsection .discard.unreachable
|
||||
.long \counter\()b -.
|
||||
.popsection
|
||||
.endm
|
||||
|
||||
.macro ANNOTATE_REACHABLE counter:req
|
||||
\counter:
|
||||
.pushsection .discard.reachable
|
||||
.long \counter\()b -.
|
||||
.popsection
|
||||
.endm
|
||||
|
||||
.macro ASM_UNREACHABLE
|
||||
999:
|
||||
.pushsection .discard.unreachable
|
||||
.long 999b - .
|
||||
.popsection
|
||||
.endm
|
||||
#else /* CONFIG_STACK_VALIDATION */
|
||||
.macro ANNOTATE_UNREACHABLE counter:req
|
||||
.endm
|
||||
|
||||
.macro ANNOTATE_REACHABLE counter:req
|
||||
.endm
|
||||
|
||||
.macro ASM_UNREACHABLE
|
||||
.endm
|
||||
#endif /* CONFIG_STACK_VALIDATION */
|
||||
|
||||
#endif /* LINKER_SCRIPT */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifndef __optimize
|
||||
|
@@ -8,8 +8,8 @@
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern int debug_locks;
|
||||
extern int debug_locks_silent;
|
||||
extern int debug_locks __read_mostly;
|
||||
extern int debug_locks_silent __read_mostly;
|
||||
|
||||
|
||||
static inline int __debug_locks_off(void)
|
||||
|
@@ -119,6 +119,68 @@ struct static_key {
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#include <asm/jump_label.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
|
||||
|
||||
struct jump_entry {
|
||||
s32 code;
|
||||
s32 target;
|
||||
long key; // key may be far away from the core kernel under KASLR
|
||||
};
|
||||
|
||||
static inline unsigned long jump_entry_code(const struct jump_entry *entry)
|
||||
{
|
||||
return (unsigned long)&entry->code + entry->code;
|
||||
}
|
||||
|
||||
static inline unsigned long jump_entry_target(const struct jump_entry *entry)
|
||||
{
|
||||
return (unsigned long)&entry->target + entry->target;
|
||||
}
|
||||
|
||||
static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
|
||||
{
|
||||
long offset = entry->key & ~3L;
|
||||
|
||||
return (struct static_key *)((unsigned long)&entry->key + offset);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline unsigned long jump_entry_code(const struct jump_entry *entry)
|
||||
{
|
||||
return entry->code;
|
||||
}
|
||||
|
||||
static inline unsigned long jump_entry_target(const struct jump_entry *entry)
|
||||
{
|
||||
return entry->target;
|
||||
}
|
||||
|
||||
static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
|
||||
{
|
||||
return (struct static_key *)((unsigned long)entry->key & ~3UL);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline bool jump_entry_is_branch(const struct jump_entry *entry)
|
||||
{
|
||||
return (unsigned long)entry->key & 1UL;
|
||||
}
|
||||
|
||||
static inline bool jump_entry_is_init(const struct jump_entry *entry)
|
||||
{
|
||||
return (unsigned long)entry->key & 2UL;
|
||||
}
|
||||
|
||||
static inline void jump_entry_set_init(struct jump_entry *entry)
|
||||
{
|
||||
entry->key |= 2;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
@@ -151,7 +213,6 @@ extern struct jump_entry __start___jump_table[];
|
||||
extern struct jump_entry __stop___jump_table[];
|
||||
|
||||
extern void jump_label_init(void);
|
||||
extern void jump_label_invalidate_initmem(void);
|
||||
extern void jump_label_lock(void);
|
||||
extern void jump_label_unlock(void);
|
||||
extern void arch_jump_label_transform(struct jump_entry *entry,
|
||||
@@ -199,8 +260,6 @@ static __always_inline void jump_label_init(void)
|
||||
static_key_initialized = true;
|
||||
}
|
||||
|
||||
static inline void jump_label_invalidate_initmem(void) {}
|
||||
|
||||
static __always_inline bool static_key_false(struct static_key *key)
|
||||
{
|
||||
if (unlikely(static_key_count(key) > 0))
|
||||
|
@@ -99,13 +99,8 @@ struct lock_class {
|
||||
*/
|
||||
unsigned int version;
|
||||
|
||||
/*
|
||||
* Statistics counter:
|
||||
*/
|
||||
unsigned long ops;
|
||||
|
||||
const char *name;
|
||||
int name_version;
|
||||
const char *name;
|
||||
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
unsigned long contention_point[LOCKSTAT_POINTS];
|
||||
|
@@ -45,10 +45,10 @@ struct rw_semaphore {
|
||||
};
|
||||
|
||||
/*
|
||||
* Setting bit 0 of the owner field with other non-zero bits will indicate
|
||||
* Setting bit 1 of the owner field but not bit 0 will indicate
|
||||
* that the rwsem is writer-owned with an unknown owner.
|
||||
*/
|
||||
#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L)
|
||||
#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L)
|
||||
|
||||
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
||||
extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
|
||||
|
@@ -735,6 +735,12 @@ struct task_struct {
|
||||
unsigned use_memdelay:1;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* May usercopy functions fault on kernel addresses?
|
||||
* This is not just a single bit because this can potentially nest.
|
||||
*/
|
||||
unsigned int kernel_uaccess_faults_ok;
|
||||
|
||||
unsigned long atomic_flags; /* Flags requiring atomic access. */
|
||||
|
||||
struct restart_block restart_block;
|
||||
|
Reference in New Issue
Block a user