Merge commit 'v2.6.27-rc8' into core/rcu
This commit is contained in:
@@ -2738,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child)
|
||||
*/
|
||||
void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
|
||||
{
|
||||
struct cgroup *oldcgrp, *newcgrp;
|
||||
struct cgroup *oldcgrp, *newcgrp = NULL;
|
||||
|
||||
if (need_mm_owner_callback) {
|
||||
int i;
|
||||
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
|
||||
struct cgroup_subsys *ss = subsys[i];
|
||||
oldcgrp = task_cgroup(old, ss->subsys_id);
|
||||
newcgrp = task_cgroup(new, ss->subsys_id);
|
||||
if (new)
|
||||
newcgrp = task_cgroup(new, ss->subsys_id);
|
||||
if (oldcgrp == newcgrp)
|
||||
continue;
|
||||
if (ss->mm_owner_changed)
|
||||
|
@@ -843,37 +843,25 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
|
||||
/**
|
||||
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
|
||||
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
|
||||
* @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
|
||||
*
|
||||
* Called with cgroup_mutex held
|
||||
*
|
||||
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
|
||||
* calling callback functions for each.
|
||||
*
|
||||
* Return 0 if successful, -errno if not.
|
||||
* No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
|
||||
* if @heap != NULL.
|
||||
*/
|
||||
static int update_tasks_cpumask(struct cpuset *cs)
|
||||
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
|
||||
{
|
||||
struct cgroup_scanner scan;
|
||||
struct ptr_heap heap;
|
||||
int retval;
|
||||
|
||||
/*
|
||||
* cgroup_scan_tasks() will initialize heap->gt for us.
|
||||
* heap_init() is still needed here for we should not change
|
||||
* cs->cpus_allowed when heap_init() fails.
|
||||
*/
|
||||
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
scan.cg = cs->css.cgroup;
|
||||
scan.test_task = cpuset_test_cpumask;
|
||||
scan.process_task = cpuset_change_cpumask;
|
||||
scan.heap = &heap;
|
||||
retval = cgroup_scan_tasks(&scan);
|
||||
|
||||
heap_free(&heap);
|
||||
return retval;
|
||||
scan.heap = heap;
|
||||
cgroup_scan_tasks(&scan);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -883,6 +871,7 @@ static int update_tasks_cpumask(struct cpuset *cs)
|
||||
*/
|
||||
static int update_cpumask(struct cpuset *cs, const char *buf)
|
||||
{
|
||||
struct ptr_heap heap;
|
||||
struct cpuset trialcs;
|
||||
int retval;
|
||||
int is_load_balanced;
|
||||
@@ -917,6 +906,10 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
|
||||
if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
|
||||
return 0;
|
||||
|
||||
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
is_load_balanced = is_sched_load_balance(&trialcs);
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
@@ -927,9 +920,9 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
|
||||
* Scan tasks in the cpuset, and update the cpumasks of any
|
||||
* that need an update.
|
||||
*/
|
||||
retval = update_tasks_cpumask(cs);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
update_tasks_cpumask(cs, &heap);
|
||||
|
||||
heap_free(&heap);
|
||||
|
||||
if (is_load_balanced)
|
||||
async_rebuild_sched_domains();
|
||||
@@ -1965,7 +1958,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
|
||||
nodes_empty(cp->mems_allowed))
|
||||
remove_tasks_in_empty_cpuset(cp);
|
||||
else {
|
||||
update_tasks_cpumask(cp);
|
||||
update_tasks_cpumask(cp, NULL);
|
||||
update_tasks_nodemask(cp, &oldmems);
|
||||
}
|
||||
}
|
||||
|
@@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
|
||||
* If there are other users of the mm and the owner (us) is exiting
|
||||
* we need to find a new owner to take on the responsibility.
|
||||
*/
|
||||
if (!mm)
|
||||
return 0;
|
||||
if (atomic_read(&mm->mm_users) <= 1)
|
||||
return 0;
|
||||
if (mm->owner != p)
|
||||
@@ -627,6 +625,16 @@ retry:
|
||||
} while_each_thread(g, c);
|
||||
|
||||
read_unlock(&tasklist_lock);
|
||||
/*
|
||||
* We found no owner yet mm_users > 1: this implies that we are
|
||||
* most likely racing with swapoff (try_to_unuse()) or /proc or
|
||||
* ptrace or page migration (get_task_mm()). Mark owner as NULL,
|
||||
* so that subsystems can understand the callback and take action.
|
||||
*/
|
||||
down_write(&mm->mmap_sem);
|
||||
cgroup_mm_owner_callbacks(mm->owner, NULL);
|
||||
mm->owner = NULL;
|
||||
up_write(&mm->mmap_sem);
|
||||
return;
|
||||
|
||||
assign_new_owner:
|
||||
|
@@ -753,8 +753,14 @@ static struct page *kimage_alloc_page(struct kimage *image,
|
||||
*old = addr | (*old & ~PAGE_MASK);
|
||||
|
||||
/* The old page I have found cannot be a
|
||||
* destination page, so return it.
|
||||
* destination page, so return it if it's
|
||||
* gfp_flags honor the ones passed in.
|
||||
*/
|
||||
if (!(gfp_mask & __GFP_HIGHMEM) &&
|
||||
PageHighMem(old_page)) {
|
||||
kimage_free_pages(old_page);
|
||||
continue;
|
||||
}
|
||||
addr = old_addr;
|
||||
page = old_page;
|
||||
break;
|
||||
|
@@ -488,7 +488,7 @@ static int write_mem_msg(int binary)
|
||||
if (err)
|
||||
return err;
|
||||
if (CACHE_FLUSH_IS_SAFE)
|
||||
flush_icache_range(addr, addr + length + 1);
|
||||
flush_icache_range(addr, addr + length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1462,7 +1462,7 @@ acquirelock:
|
||||
* Get the passive CPU lock which will hold all the non-primary
|
||||
* CPU in a spin state while the debugger is active
|
||||
*/
|
||||
if (!kgdb_single_step || !kgdb_contthread) {
|
||||
if (!kgdb_single_step) {
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
atomic_set(&passive_cpu_wait[i], 1);
|
||||
}
|
||||
@@ -1475,7 +1475,7 @@ acquirelock:
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Signal the other CPUs to enter kgdb_wait() */
|
||||
if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup)
|
||||
if ((!kgdb_single_step) && kgdb_do_roundup)
|
||||
kgdb_roundup_cpus(flags);
|
||||
#endif
|
||||
|
||||
@@ -1494,7 +1494,7 @@ acquirelock:
|
||||
kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code);
|
||||
kgdb_deactivate_sw_breakpoints();
|
||||
kgdb_single_step = 0;
|
||||
kgdb_contthread = NULL;
|
||||
kgdb_contthread = current;
|
||||
exception_level = 0;
|
||||
|
||||
/* Talk to debugger with gdbserial protocol */
|
||||
@@ -1508,7 +1508,7 @@ acquirelock:
|
||||
kgdb_info[ks->cpu].task = NULL;
|
||||
atomic_set(&cpu_in_kgdb[ks->cpu], 0);
|
||||
|
||||
if (!kgdb_single_step || !kgdb_contthread) {
|
||||
if (!kgdb_single_step) {
|
||||
for (i = NR_CPUS-1; i >= 0; i--)
|
||||
atomic_set(&passive_cpu_wait[i], 0);
|
||||
/*
|
||||
|
@@ -1087,7 +1087,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void init_hrtick(void)
|
||||
static __init void init_hrtick(void)
|
||||
{
|
||||
hotcpu_notifier(hotplug_hrtick, 0);
|
||||
}
|
||||
@@ -8909,6 +8909,9 @@ static int sched_rt_global_constraints(void)
|
||||
u64 rt_runtime, rt_period;
|
||||
int ret = 0;
|
||||
|
||||
if (sysctl_sched_rt_period <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
|
||||
rt_runtime = tg->rt_bandwidth.rt_runtime;
|
||||
|
||||
@@ -8925,6 +8928,9 @@ static int sched_rt_global_constraints(void)
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
if (sysctl_sched_rt_period <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
|
||||
for_each_possible_cpu(i) {
|
||||
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
|
||||
|
@@ -350,6 +350,7 @@ static void __enable_runtime(struct rq *rq)
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
rt_rq->rt_runtime = rt_b->rt_runtime;
|
||||
rt_rq->rt_time = 0;
|
||||
rt_rq->rt_throttled = 0;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
spin_unlock(&rt_b->rt_runtime_lock);
|
||||
}
|
||||
|
@@ -71,6 +71,16 @@ void clockevents_set_mode(struct clock_event_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* clockevents_shutdown - shutdown the device and clear next_event
|
||||
* @dev: device to shutdown
|
||||
*/
|
||||
void clockevents_shutdown(struct clock_event_device *dev)
|
||||
{
|
||||
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
dev->next_event.tv64 = KTIME_MAX;
|
||||
}
|
||||
|
||||
/**
|
||||
* clockevents_program_event - Reprogram the clock event device.
|
||||
* @expires: absolute expiry time (monotonic clock)
|
||||
@@ -206,7 +216,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
|
||||
|
||||
if (new) {
|
||||
BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
|
||||
clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
clockevents_shutdown(new);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@@ -235,9 +235,8 @@ static void tick_do_broadcast_on_off(void *why)
|
||||
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
|
||||
if (!cpu_isset(cpu, tick_broadcast_mask)) {
|
||||
cpu_set(cpu, tick_broadcast_mask);
|
||||
if (td->mode == TICKDEV_MODE_PERIODIC)
|
||||
clockevents_set_mode(dev,
|
||||
CLOCK_EVT_MODE_SHUTDOWN);
|
||||
if (bc->mode == TICKDEV_MODE_PERIODIC)
|
||||
clockevents_shutdown(dev);
|
||||
}
|
||||
if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
|
||||
tick_broadcast_force = 1;
|
||||
@@ -246,7 +245,7 @@ static void tick_do_broadcast_on_off(void *why)
|
||||
if (!tick_broadcast_force &&
|
||||
cpu_isset(cpu, tick_broadcast_mask)) {
|
||||
cpu_clear(cpu, tick_broadcast_mask);
|
||||
if (td->mode == TICKDEV_MODE_PERIODIC)
|
||||
if (bc->mode == TICKDEV_MODE_PERIODIC)
|
||||
tick_setup_periodic(dev, 0);
|
||||
}
|
||||
break;
|
||||
@@ -254,7 +253,7 @@ static void tick_do_broadcast_on_off(void *why)
|
||||
|
||||
if (cpus_empty(tick_broadcast_mask)) {
|
||||
if (!bc_stopped)
|
||||
clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
clockevents_shutdown(bc);
|
||||
} else if (bc_stopped) {
|
||||
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
||||
tick_broadcast_start_periodic(bc);
|
||||
@@ -306,7 +305,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
|
||||
|
||||
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
|
||||
if (bc && cpus_empty(tick_broadcast_mask))
|
||||
clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
clockevents_shutdown(bc);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
@@ -321,7 +320,7 @@ void tick_suspend_broadcast(void)
|
||||
|
||||
bc = tick_broadcast_device.evtdev;
|
||||
if (bc)
|
||||
clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
clockevents_shutdown(bc);
|
||||
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
@@ -576,4 +575,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check, whether the broadcast device is in one shot mode
|
||||
*/
|
||||
int tick_broadcast_oneshot_active(void)
|
||||
{
|
||||
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||
*/
|
||||
ktime_t tick_next_period;
|
||||
ktime_t tick_period;
|
||||
int tick_do_timer_cpu __read_mostly = -1;
|
||||
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
|
||||
DEFINE_SPINLOCK(tick_device_lock);
|
||||
|
||||
/*
|
||||
@@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
|
||||
if (!tick_device_is_functional(dev))
|
||||
return;
|
||||
|
||||
if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {
|
||||
if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
|
||||
!tick_broadcast_oneshot_active()) {
|
||||
clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
|
||||
} else {
|
||||
unsigned long seq;
|
||||
@@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td,
|
||||
* If no cpu took the do_timer update, assign it to
|
||||
* this cpu:
|
||||
*/
|
||||
if (tick_do_timer_cpu == -1) {
|
||||
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
|
||||
tick_do_timer_cpu = cpu;
|
||||
tick_next_period = ktime_get();
|
||||
tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
|
||||
@@ -249,7 +250,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
||||
* not give it back to the clockevents layer !
|
||||
*/
|
||||
if (tick_is_broadcast_device(curdev)) {
|
||||
clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
clockevents_shutdown(curdev);
|
||||
curdev = NULL;
|
||||
}
|
||||
clockevents_exchange_device(curdev, newdev);
|
||||
@@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup)
|
||||
if (*cpup == tick_do_timer_cpu) {
|
||||
int cpu = first_cpu(cpu_online_map);
|
||||
|
||||
tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1;
|
||||
tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu :
|
||||
TICK_DO_TIMER_NONE;
|
||||
}
|
||||
spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
}
|
||||
@@ -311,7 +313,7 @@ static void tick_suspend(void)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tick_device_lock, flags);
|
||||
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
clockevents_shutdown(td->evtdev);
|
||||
spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,10 @@
|
||||
/*
|
||||
* tick internal variable and functions used by low/high res code
|
||||
*/
|
||||
|
||||
#define TICK_DO_TIMER_NONE -1
|
||||
#define TICK_DO_TIMER_BOOT -2
|
||||
|
||||
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||
extern spinlock_t tick_device_lock;
|
||||
extern ktime_t tick_next_period;
|
||||
@@ -10,6 +14,8 @@ extern int tick_do_timer_cpu __read_mostly;
|
||||
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
|
||||
extern void tick_handle_periodic(struct clock_event_device *dev);
|
||||
|
||||
extern void clockevents_shutdown(struct clock_event_device *dev);
|
||||
|
||||
/*
|
||||
* NO_HZ / high resolution timer shared code
|
||||
*/
|
||||
@@ -29,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason);
|
||||
extern void tick_broadcast_switch_to_oneshot(void);
|
||||
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
|
||||
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
|
||||
extern int tick_broadcast_oneshot_active(void);
|
||||
# else /* BROADCAST */
|
||||
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
||||
{
|
||||
@@ -37,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
||||
static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
|
||||
static inline void tick_broadcast_switch_to_oneshot(void) { }
|
||||
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
|
||||
static inline int tick_broadcast_oneshot_active(void) { return 0; }
|
||||
# endif /* !BROADCAST */
|
||||
|
||||
#else /* !ONESHOT */
|
||||
@@ -66,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int tick_broadcast_oneshot_active(void) { return 0; }
|
||||
#endif /* !TICK_ONESHOT */
|
||||
|
||||
/*
|
||||
|
@@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now)
|
||||
incr * ticks);
|
||||
}
|
||||
do_timer(++ticks);
|
||||
|
||||
/* Keep the tick_next_period variable up to date */
|
||||
tick_next_period = ktime_add(last_jiffies_update, tick_period);
|
||||
}
|
||||
write_sequnlock(&xtime_lock);
|
||||
}
|
||||
@@ -221,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
*/
|
||||
if (unlikely(!cpu_online(cpu))) {
|
||||
if (cpu == tick_do_timer_cpu)
|
||||
tick_do_timer_cpu = -1;
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
}
|
||||
|
||||
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
|
||||
@@ -303,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
* invoked.
|
||||
*/
|
||||
if (cpu == tick_do_timer_cpu)
|
||||
tick_do_timer_cpu = -1;
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
|
||||
ts->idle_sleeps++;
|
||||
|
||||
@@ -468,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
|
||||
* this duty, then the jiffies update is still serialized by
|
||||
* xtime_lock.
|
||||
*/
|
||||
if (unlikely(tick_do_timer_cpu == -1))
|
||||
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
||||
tick_do_timer_cpu = cpu;
|
||||
|
||||
/* Check, if the jiffies need an update */
|
||||
@@ -570,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
||||
* this duty, then the jiffies update is still serialized by
|
||||
* xtime_lock.
|
||||
*/
|
||||
if (unlikely(tick_do_timer_cpu == -1))
|
||||
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
||||
tick_do_timer_cpu = cpu;
|
||||
#endif
|
||||
|
||||
|
Reference in New Issue
Block a user