|
|
|
@@ -307,6 +307,32 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
|
|
|
|
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Deadlock detection is conditional:
|
|
|
|
|
*
|
|
|
|
|
* If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
|
|
|
|
|
* if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
|
|
|
|
|
*
|
|
|
|
|
* If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
|
|
|
|
|
* conducted independent of the detect argument.
|
|
|
|
|
*
|
|
|
|
|
* If the waiter argument is NULL this indicates the deboost path and
|
|
|
|
|
* deadlock detection is disabled independent of the detect argument
|
|
|
|
|
* and the config settings.
|
|
|
|
|
*/
|
|
|
|
|
static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
|
|
|
|
|
enum rtmutex_chainwalk chwalk)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* This is just a wrapper function for the following call,
|
|
|
|
|
* because debug_rt_mutex_detect_deadlock() smells like a magic
|
|
|
|
|
* debug feature and I wanted to keep the cond function in the
|
|
|
|
|
* main source file along with the comments instead of having
|
|
|
|
|
* two of the same in the headers.
|
|
|
|
|
*/
|
|
|
|
|
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Max number of times we'll walk the boosting chain:
|
|
|
|
|
*/
|
|
|
|
@@ -381,7 +407,7 @@ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
|
|
|
|
|
* goto again;
|
|
|
|
|
*/
|
|
|
|
|
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
|
|
int deadlock_detect,
|
|
|
|
|
enum rtmutex_chainwalk chwalk,
|
|
|
|
|
struct rt_mutex *orig_lock,
|
|
|
|
|
struct rt_mutex *next_lock,
|
|
|
|
|
struct rt_mutex_waiter *orig_waiter,
|
|
|
|
@@ -389,12 +415,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
|
|
{
|
|
|
|
|
struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
|
|
|
|
|
struct rt_mutex_waiter *prerequeue_top_waiter;
|
|
|
|
|
int detect_deadlock, ret = 0, depth = 0;
|
|
|
|
|
int ret = 0, depth = 0;
|
|
|
|
|
struct rt_mutex *lock;
|
|
|
|
|
bool detect_deadlock;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
|
|
|
|
|
deadlock_detect);
|
|
|
|
|
detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The (de)boosting is a step by step approach with a lot of
|
|
|
|
@@ -520,7 +546,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
|
|
* walk, we detected a deadlock.
|
|
|
|
|
*/
|
|
|
|
|
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
|
|
|
|
|
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
|
|
|
|
|
debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
|
|
|
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
|
|
|
ret = -EDEADLK;
|
|
|
|
|
goto out_unlock_pi;
|
|
|
|
@@ -784,7 +810,7 @@ takeit:
|
|
|
|
|
static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
|
|
struct rt_mutex_waiter *waiter,
|
|
|
|
|
struct task_struct *task,
|
|
|
|
|
int detect_deadlock)
|
|
|
|
|
enum rtmutex_chainwalk chwalk)
|
|
|
|
|
{
|
|
|
|
|
struct task_struct *owner = rt_mutex_owner(lock);
|
|
|
|
|
struct rt_mutex_waiter *top_waiter = waiter;
|
|
|
|
@@ -830,7 +856,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
|
|
__rt_mutex_adjust_prio(owner);
|
|
|
|
|
if (owner->pi_blocked_on)
|
|
|
|
|
chain_walk = 1;
|
|
|
|
|
} else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
|
|
|
|
|
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
|
|
|
|
|
chain_walk = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -855,7 +881,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|
|
|
|
|
|
|
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
|
|
|
|
|
|
|
|
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
|
|
|
|
|
res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
|
|
|
|
|
next_lock, waiter, task);
|
|
|
|
|
|
|
|
|
|
raw_spin_lock(&lock->wait_lock);
|
|
|
|
@@ -960,7 +986,8 @@ static void remove_waiter(struct rt_mutex *lock,
|
|
|
|
|
|
|
|
|
|
raw_spin_unlock(&lock->wait_lock);
|
|
|
|
|
|
|
|
|
|
rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
|
|
|
|
|
rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
|
|
|
|
|
next_lock, NULL, current);
|
|
|
|
|
|
|
|
|
|
raw_spin_lock(&lock->wait_lock);
|
|
|
|
|
}
|
|
|
|
@@ -990,7 +1017,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
|
|
|
|
|
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
|
|
|
|
get_task_struct(task);
|
|
|
|
|
|
|
|
|
|
rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
|
|
|
|
|
rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
|
|
|
|
|
next_lock, NULL, task);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@@ -1068,7 +1096,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
|
|
|
|
static int __sched
|
|
|
|
|
rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
|
|
|
struct hrtimer_sleeper *timeout,
|
|
|
|
|
int detect_deadlock)
|
|
|
|
|
enum rtmutex_chainwalk chwalk)
|
|
|
|
|
{
|
|
|
|
|
struct rt_mutex_waiter waiter;
|
|
|
|
|
int ret = 0;
|
|
|
|
@@ -1094,7 +1122,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
|
|
|
timeout->task = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
|
|
|
|
|
ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
|
|
|
|
|
|
|
|
|
|
if (likely(!ret))
|
|
|
|
|
ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
|
|
|
|
@@ -1103,7 +1131,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
|
|
|
|
|
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
|
remove_waiter(lock, &waiter);
|
|
|
|
|
rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
|
|
|
|
|
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -1230,27 +1258,29 @@ static inline int
|
|
|
|
|
rt_mutex_fastlock(struct rt_mutex *lock, int state,
|
|
|
|
|
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
|
|
|
struct hrtimer_sleeper *timeout,
|
|
|
|
|
int detect_deadlock))
|
|
|
|
|
enum rtmutex_chainwalk chwalk))
|
|
|
|
|
{
|
|
|
|
|
if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
|
|
|
|
rt_mutex_deadlock_account_lock(lock, current);
|
|
|
|
|
return 0;
|
|
|
|
|
} else
|
|
|
|
|
return slowfn(lock, state, NULL, 0);
|
|
|
|
|
return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
|
rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
|
|
|
|
|
struct hrtimer_sleeper *timeout, int detect_deadlock,
|
|
|
|
|
struct hrtimer_sleeper *timeout,
|
|
|
|
|
enum rtmutex_chainwalk chwalk,
|
|
|
|
|
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
|
|
|
struct hrtimer_sleeper *timeout,
|
|
|
|
|
int detect_deadlock))
|
|
|
|
|
enum rtmutex_chainwalk chwalk))
|
|
|
|
|
{
|
|
|
|
|
if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
|
|
|
|
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
|
|
|
|
|
likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
|
|
|
|
rt_mutex_deadlock_account_lock(lock, current);
|
|
|
|
|
return 0;
|
|
|
|
|
} else
|
|
|
|
|
return slowfn(lock, state, timeout, detect_deadlock);
|
|
|
|
|
return slowfn(lock, state, timeout, chwalk);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
@@ -1312,7 +1342,8 @@ int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
|
|
|
|
|
{
|
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
|
|
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 1,
|
|
|
|
|
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
|
|
|
|
|
RT_MUTEX_FULL_CHAINWALK,
|
|
|
|
|
rt_mutex_slowlock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1334,7 +1365,8 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
|
|
|
|
|
{
|
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
|
|
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 0,
|
|
|
|
|
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
|
|
|
|
|
RT_MUTEX_MIN_CHAINWALK,
|
|
|
|
|
rt_mutex_slowlock);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
|
|
|
|
@@ -1463,7 +1495,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We enforce deadlock detection for futexes */
|
|
|
|
|
ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
|
|
|
|
|
ret = task_blocks_on_rt_mutex(lock, waiter, task,
|
|
|
|
|
RT_MUTEX_FULL_CHAINWALK);
|
|
|
|
|
|
|
|
|
|
if (ret && !rt_mutex_owner(lock)) {
|
|
|
|
|
/*
|
|
|
|
|