Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: - massive CPU hotplug rework (Thomas Gleixner) - improve migration fairness (Peter Zijlstra) - CPU load calculation updates/cleanups (Yuyang Du) - cpufreq updates (Steve Muckle) - nohz optimizations (Frederic Weisbecker) - switch_mm() micro-optimization on x86 (Andy Lutomirski) - ... lots of other enhancements, fixes and cleanups. * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (66 commits) ARM: Hide finish_arch_post_lock_switch() from modules sched/core: Provide a tsk_nr_cpus_allowed() helper sched/core: Use tsk_cpus_allowed() instead of accessing ->cpus_allowed sched/loadavg: Fix loadavg artifacts on fully idle and on fully loaded systems sched/fair: Correct unit of load_above_capacity sched/fair: Clean up scale confusion sched/nohz: Fix affine unpinned timers mess sched/fair: Fix fairness issue on migration sched/core: Kill sched_class::task_waking to clean up the migration logic sched/fair: Prepare to fix fairness problems on migration sched/fair: Move record_wakee() sched/core: Fix comment typo in wake_q_add() sched/core: Remove unused variable sched: Make hrtick_notifier an explicit call sched/fair: Make ilb_notifier an explicit call sched/hotplug: Make activate() the last hotplug step sched/hotplug: Move migration CPU_DYING to sched_cpu_dying() sched/migration: Move CPU_ONLINE into scheduler state sched/migration: Move calc_load_migrate() into CPU_DYING sched/migration: Move prepare transition to SCHED_STARTING state ...
This commit is contained in:
@@ -45,6 +45,7 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/kmemcheck.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
|
||||
@@ -3585,7 +3586,35 @@ static int __lock_is_held(struct lockdep_map *lock)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __lock_pin_lock(struct lockdep_map *lock)
|
||||
static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
|
||||
{
|
||||
struct pin_cookie cookie = NIL_COOKIE;
|
||||
struct task_struct *curr = current;
|
||||
int i;
|
||||
|
||||
if (unlikely(!debug_locks))
|
||||
return cookie;
|
||||
|
||||
for (i = 0; i < curr->lockdep_depth; i++) {
|
||||
struct held_lock *hlock = curr->held_locks + i;
|
||||
|
||||
if (match_held_lock(hlock, lock)) {
|
||||
/*
|
||||
* Grab 16bits of randomness; this is sufficient to not
|
||||
* be guessable and still allows some pin nesting in
|
||||
* our u32 pin_count.
|
||||
*/
|
||||
cookie.val = 1 + (prandom_u32() >> 16);
|
||||
hlock->pin_count += cookie.val;
|
||||
return cookie;
|
||||
}
|
||||
}
|
||||
|
||||
WARN(1, "pinning an unheld lock\n");
|
||||
return cookie;
|
||||
}
|
||||
|
||||
static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
int i;
|
||||
@@ -3597,7 +3626,7 @@ static void __lock_pin_lock(struct lockdep_map *lock)
|
||||
struct held_lock *hlock = curr->held_locks + i;
|
||||
|
||||
if (match_held_lock(hlock, lock)) {
|
||||
hlock->pin_count++;
|
||||
hlock->pin_count += cookie.val;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -3605,7 +3634,7 @@ static void __lock_pin_lock(struct lockdep_map *lock)
|
||||
WARN(1, "pinning an unheld lock\n");
|
||||
}
|
||||
|
||||
static void __lock_unpin_lock(struct lockdep_map *lock)
|
||||
static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
int i;
|
||||
@@ -3620,7 +3649,11 @@ static void __lock_unpin_lock(struct lockdep_map *lock)
|
||||
if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
|
||||
return;
|
||||
|
||||
hlock->pin_count--;
|
||||
hlock->pin_count -= cookie.val;
|
||||
|
||||
if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
|
||||
hlock->pin_count = 0;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -3751,24 +3784,27 @@ int lock_is_held(struct lockdep_map *lock)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lock_is_held);
|
||||
|
||||
void lock_pin_lock(struct lockdep_map *lock)
|
||||
struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
|
||||
{
|
||||
struct pin_cookie cookie = NIL_COOKIE;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
return;
|
||||
return cookie;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion = 1;
|
||||
__lock_pin_lock(lock);
|
||||
cookie = __lock_pin_lock(lock);
|
||||
current->lockdep_recursion = 0;
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
return cookie;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lock_pin_lock);
|
||||
|
||||
void lock_unpin_lock(struct lockdep_map *lock)
|
||||
void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -3779,7 +3815,24 @@ void lock_unpin_lock(struct lockdep_map *lock)
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion = 1;
|
||||
__lock_unpin_lock(lock);
|
||||
__lock_repin_lock(lock, cookie);
|
||||
current->lockdep_recursion = 0;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lock_repin_lock);
|
||||
|
||||
void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion = 1;
|
||||
__lock_unpin_lock(lock, cookie);
|
||||
current->lockdep_recursion = 0;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
Reference in New Issue
Block a user