Add new 'cond_resched_bkl()' helper function
It acts exactly like a regular 'cond_resched()', but will not get
optimized away when CONFIG_PREEMPT is set.
Normal kernel code is already preemptable in the presense of
CONFIG_PREEMPT, so cond_resched() is optimized away (see commit
02b67cc3ba
"sched: do not do
cond_resched() when CONFIG_PREEMPT").
But when wanting to conditionally reschedule while holding a lock, you
need to use "cond_sched_lock(lock)", and the new function is the BKL
equivalent of that.
Also make fs/locks.c use it.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
@@ -2037,13 +2037,13 @@ static inline int need_resched(void)
|
||||
* cond_resched_lock() will drop the spinlock before scheduling,
|
||||
* cond_resched_softirq() will enable bhs before scheduling.
|
||||
*/
|
||||
extern int _cond_resched(void);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
static inline int cond_resched(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
extern int _cond_resched(void);
|
||||
static inline int cond_resched(void)
|
||||
{
|
||||
return _cond_resched();
|
||||
@@ -2051,6 +2051,10 @@ static inline int cond_resched(void)
|
||||
#endif
|
||||
extern int cond_resched_lock(spinlock_t * lock);
|
||||
extern int cond_resched_softirq(void);
|
||||
static inline int cond_resched_bkl(void)
|
||||
{
|
||||
return _cond_resched();
|
||||
}
|
||||
|
||||
/*
|
||||
* Does a critical section need to be broken due to another
|
||||
|
Reference in New Issue
Block a user