Merge branch 'sched/rt' into sched/core, to pick up -rt changes

Pick up the first couple of patches working towards PREEMPT_RT.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2019-09-16 14:04:28 +02:00
517 changed files with 3636 additions and 4684 deletions

View File

@@ -1772,7 +1772,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
*/
#ifndef CONFIG_PREEMPT
#ifndef CONFIG_PREEMPTION
extern int _cond_resched(void);
#else
static inline int _cond_resched(void) { return 0; }
@@ -1801,12 +1801,12 @@ static inline void cond_resched_rcu(void)
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
return spin_is_contended(lock);
#else
return 0;