sched: Get rid of lock_depth
Neil Brown pointed out that lock_depth somehow escaped the BKL removal work. Let's get rid of it now. Note that the perf scripting utilities still have a bunch of code for dealing with common_lock_depth in tracepoints; I have left that in place in case anybody wants to use that code with older kernels. Suggested-by: Neil Brown <neilb@suse.de> Signed-off-by: Jonathan Corbet <corbet@lwn.net> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110422111910.456c0e84@bike.lwn.net Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:

committed by
Ingo Molnar

parent
d3bf52e998
commit
625f2a378e
@@ -4121,12 +4121,6 @@ static inline void schedule_debug(struct task_struct *prev)
|
||||
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
|
||||
|
||||
schedstat_inc(this_rq(), sched_count);
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
if (unlikely(prev->lock_depth >= 0)) {
|
||||
schedstat_inc(this_rq(), rq_sched_info.bkl_count);
|
||||
schedstat_inc(prev, sched_info.bkl_count);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
||||
@@ -5852,11 +5846,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
||||
/* Set the preempt count _outside_ the spinlocks! */
|
||||
#if defined(CONFIG_PREEMPT)
|
||||
task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
|
||||
#else
|
||||
task_thread_info(idle)->preempt_count = 0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The idle tasks have their own, simple scheduling class:
|
||||
*/
|
||||
|
Reference in New Issue
Block a user