locking/mutex: Improve inlining
Instead of inlining __mutex_lock_common() 5 times, once for each {state,ww} variant. Reduce this to two, ww and !ww. Then add __always_inline to mutex_optimistic_spin(), so that that will get inlined all 4 remaining times, for all {waiter,ww} variants. text data bss dec hex filename 6301 0 0 6301 189d defconfig-build/kernel/locking/mutex.o 4053 0 0 4053 fd5 defconfig-build/kernel/locking/mutex.o 4257 0 0 4257 10a1 defconfig-build/kernel/locking/mutex.o This reduces total text size and better separates the ww and !ww mutex code generation. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
659cf9f582
commit
427b18207a
@@ -241,8 +241,8 @@ void __sched mutex_lock(struct mutex *lock)
|
|||||||
EXPORT_SYMBOL(mutex_lock);
|
EXPORT_SYMBOL(mutex_lock);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
|
static __always_inline void
|
||||||
struct ww_acquire_ctx *ww_ctx)
|
ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
/*
|
/*
|
||||||
@@ -323,8 +323,7 @@ __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
|
|||||||
* slowpath, set ctx and wake up any waiters so they can recheck.
|
* slowpath, set ctx and wake up any waiters so they can recheck.
|
||||||
*/
|
*/
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
ww_mutex_set_context_fastpath(struct ww_mutex *lock,
|
ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||||
struct ww_acquire_ctx *ctx)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@@ -365,8 +364,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
|
|||||||
* Callers must hold the mutex wait_lock.
|
* Callers must hold the mutex wait_lock.
|
||||||
*/
|
*/
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
ww_mutex_set_context_slowpath(struct ww_mutex *lock,
|
ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||||
struct ww_acquire_ctx *ctx)
|
|
||||||
{
|
{
|
||||||
ww_mutex_lock_acquired(lock, ctx);
|
ww_mutex_lock_acquired(lock, ctx);
|
||||||
lock->ctx = ctx;
|
lock->ctx = ctx;
|
||||||
@@ -459,9 +457,9 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
|||||||
* with the spinner at the head of the OSQ, if present, until the owner is
|
* with the spinner at the head of the OSQ, if present, until the owner is
|
||||||
* changed to itself.
|
* changed to itself.
|
||||||
*/
|
*/
|
||||||
static bool mutex_optimistic_spin(struct mutex *lock,
|
static __always_inline bool
|
||||||
struct ww_acquire_ctx *ww_ctx,
|
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||||
const bool use_ww_ctx, const bool waiter)
|
const bool use_ww_ctx, const bool waiter)
|
||||||
{
|
{
|
||||||
if (!waiter) {
|
if (!waiter) {
|
||||||
/*
|
/*
|
||||||
@@ -551,9 +549,9 @@ fail:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static bool mutex_optimistic_spin(struct mutex *lock,
|
static __always_inline bool
|
||||||
struct ww_acquire_ctx *ww_ctx,
|
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||||
const bool use_ww_ctx, const bool waiter)
|
const bool use_ww_ctx, const bool waiter)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -712,8 +710,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
struct ww_mutex *ww;
|
struct ww_mutex *ww;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ww = container_of(lock, struct ww_mutex, base);
|
might_sleep();
|
||||||
|
|
||||||
|
ww = container_of(lock, struct ww_mutex, base);
|
||||||
if (use_ww_ctx && ww_ctx) {
|
if (use_ww_ctx && ww_ctx) {
|
||||||
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
||||||
return -EALREADY;
|
return -EALREADY;
|
||||||
@@ -849,13 +848,26 @@ err_early_backoff:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __sched
|
||||||
|
__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
|
||||||
|
struct lockdep_map *nest_lock, unsigned long ip)
|
||||||
|
{
|
||||||
|
return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __sched
|
||||||
|
__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
|
||||||
|
struct lockdep_map *nest_lock, unsigned long ip,
|
||||||
|
struct ww_acquire_ctx *ww_ctx)
|
||||||
|
{
|
||||||
|
return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
void __sched
|
void __sched
|
||||||
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
||||||
{
|
{
|
||||||
might_sleep();
|
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
||||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
|
||||||
subclass, NULL, _RET_IP_, NULL, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
||||||
@@ -863,27 +875,21 @@ EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
|||||||
void __sched
|
void __sched
|
||||||
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
||||||
{
|
{
|
||||||
might_sleep();
|
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
|
||||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
|
||||||
0, nest, _RET_IP_, NULL, 0);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
||||||
|
|
||||||
int __sched
|
int __sched
|
||||||
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
|
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
|
||||||
{
|
{
|
||||||
might_sleep();
|
return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
|
||||||
return __mutex_lock_common(lock, TASK_KILLABLE,
|
|
||||||
subclass, NULL, _RET_IP_, NULL, 0);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
||||||
|
|
||||||
int __sched
|
int __sched
|
||||||
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
||||||
{
|
{
|
||||||
might_sleep();
|
return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
||||||
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
|
|
||||||
subclass, NULL, _RET_IP_, NULL, 0);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
||||||
|
|
||||||
@@ -919,9 +925,9 @@ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
|
ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
|
||||||
0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
|
0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
|
||||||
ctx, 1);
|
ctx);
|
||||||
if (!ret && ctx && ctx->acquired > 1)
|
if (!ret && ctx && ctx->acquired > 1)
|
||||||
return ww_mutex_deadlock_injection(lock, ctx);
|
return ww_mutex_deadlock_injection(lock, ctx);
|
||||||
|
|
||||||
@@ -935,9 +941,9 @@ ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
|
ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
|
||||||
0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
|
0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
|
||||||
ctx, 1);
|
ctx);
|
||||||
|
|
||||||
if (!ret && ctx && ctx->acquired > 1)
|
if (!ret && ctx && ctx->acquired > 1)
|
||||||
return ww_mutex_deadlock_injection(lock, ctx);
|
return ww_mutex_deadlock_injection(lock, ctx);
|
||||||
@@ -1060,37 +1066,34 @@ EXPORT_SYMBOL(mutex_lock_killable);
|
|||||||
static noinline void __sched
|
static noinline void __sched
|
||||||
__mutex_lock_slowpath(struct mutex *lock)
|
__mutex_lock_slowpath(struct mutex *lock)
|
||||||
{
|
{
|
||||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
|
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
|
||||||
NULL, _RET_IP_, NULL, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int __sched
|
static noinline int __sched
|
||||||
__mutex_lock_killable_slowpath(struct mutex *lock)
|
__mutex_lock_killable_slowpath(struct mutex *lock)
|
||||||
{
|
{
|
||||||
return __mutex_lock_common(lock, TASK_KILLABLE, 0,
|
return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
|
||||||
NULL, _RET_IP_, NULL, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int __sched
|
static noinline int __sched
|
||||||
__mutex_lock_interruptible_slowpath(struct mutex *lock)
|
__mutex_lock_interruptible_slowpath(struct mutex *lock)
|
||||||
{
|
{
|
||||||
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
|
return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
|
||||||
NULL, _RET_IP_, NULL, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int __sched
|
static noinline int __sched
|
||||||
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||||
{
|
{
|
||||||
return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
|
return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
|
||||||
NULL, _RET_IP_, ctx, 1);
|
_RET_IP_, ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int __sched
|
static noinline int __sched
|
||||||
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
||||||
struct ww_acquire_ctx *ctx)
|
struct ww_acquire_ctx *ctx)
|
||||||
{
|
{
|
||||||
return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
|
return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
|
||||||
NULL, _RET_IP_, ctx, 1);
|
_RET_IP_, ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Reference in New Issue
Block a user