Merge 3.12-rc6 into driver-core-next

We want these fixes here too.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman
2013-10-19 13:05:38 -07:00
562 changed files with 4622 additions and 3327 deletions

View File

@@ -7237,15 +7237,15 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
perf_remove_from_context(event);
unaccount_event_cpu(event, src_cpu);
put_ctx(src_ctx);
list_add(&event->event_entry, &events);
list_add(&event->migrate_entry, &events);
}
mutex_unlock(&src_ctx->mutex);
synchronize_rcu();
mutex_lock(&dst_ctx->mutex);
list_for_each_entry_safe(event, tmp, &events, event_entry) {
list_del(&event->event_entry);
list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
list_del(&event->migrate_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
account_event_cpu(event, dst_cpu);

View File

@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
DECLARE_COMPLETION_ONSTACK(done);
int retval = 0;
if (!sub_info->path) {
call_usermodehelper_freeinfo(sub_info);
return -EINVAL;
}
helper_lock();
if (!khelper_wq || usermodehelper_disabled) {
retval = -EBUSY;

View File

@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
*/
wake_up_process(ns->child_reaper);
break;
case PIDNS_HASH_ADDING:
/* Handle a fork failure of the first process */
WARN_ON(ns->child_reaper);
ns->nr_hashed = 0;
/* fall through */
case 0:
schedule_work(&ns->proc_work);
break;

View File

@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void)
struct memory_bitmap *bm1, *bm2;
int error = 0;
BUG_ON(forbidden_pages_map || free_pages_map);
if (forbidden_pages_map && free_pages_map)
return 0;
else
BUG_ON(forbidden_pages_map || free_pages_map);
bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
if (!bm1)

View File

@@ -39,6 +39,7 @@ static struct snapshot_data {
char frozen;
char ready;
char platform_support;
bool free_bitmaps;
} snapshot_state;
atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -82,6 +83,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->swap = -1;
data->mode = O_WRONLY;
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
if (!error) {
error = create_basic_memory_bitmaps();
data->free_bitmaps = !error;
}
if (error)
pm_notifier_call_chain(PM_POST_RESTORE);
}
@@ -111,6 +116,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
pm_restore_gfp_mask();
free_basic_memory_bitmaps();
thaw_processes();
} else if (data->free_bitmaps) {
free_basic_memory_bitmaps();
}
pm_notifier_call_chain(data->mode == O_RDONLY ?
PM_POST_HIBERNATION : PM_POST_RESTORE);
@@ -231,6 +238,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
break;
pm_restore_gfp_mask();
free_basic_memory_bitmaps();
data->free_bitmaps = false;
thaw_processes();
data->frozen = 0;
break;

View File

@@ -328,10 +328,19 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
if (!force_irqthreads)
__do_softirq();
else
if (!force_irqthreads) {
/*
* We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty
* at this stage. But we have no way to know if the arch
* calls irq_exit() on the irq stack. So call softirq
* in its own stack to prevent from any overrun on top
* of a potentially deep task stack.
*/
do_softirq();
} else {
wakeup_softirqd();
}
}
static inline void tick_irq_exit(void)