Merge branch 'acpi-pm' into pm-sleep
This commit is contained in:
@@ -643,13 +643,13 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
|
||||
if ((task_active_pid_ns(current) != &init_pid_ns))
|
||||
return -EPERM;
|
||||
|
||||
if (!capable(CAP_AUDIT_CONTROL))
|
||||
if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
|
||||
err = -EPERM;
|
||||
break;
|
||||
case AUDIT_USER:
|
||||
case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
|
||||
case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
|
||||
if (!capable(CAP_AUDIT_WRITE))
|
||||
if (!netlink_capable(skb, CAP_AUDIT_WRITE))
|
||||
err = -EPERM;
|
||||
break;
|
||||
default: /* bad msg */
|
||||
|
@@ -120,7 +120,7 @@ void context_tracking_user_enter(void)
|
||||
* instead of preempt_schedule() to exit user context if needed before
|
||||
* calling the scheduler.
|
||||
*/
|
||||
asmlinkage void __sched notrace preempt_schedule_context(void)
|
||||
asmlinkage __visible void __sched notrace preempt_schedule_context(void)
|
||||
{
|
||||
enum ctx_state prev_ctx;
|
||||
|
||||
|
@@ -234,6 +234,11 @@ again:
|
||||
goto again;
|
||||
}
|
||||
timer->base = new_base;
|
||||
} else {
|
||||
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
|
||||
cpu = this_cpu;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
return new_base;
|
||||
}
|
||||
@@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
|
||||
|
||||
cpu_base->expires_next.tv64 = expires_next.tv64;
|
||||
|
||||
/*
|
||||
* If a hang was detected in the last timer interrupt then we
|
||||
* leave the hang delay active in the hardware. We want the
|
||||
* system to make progress. That also prevents the following
|
||||
* scenario:
|
||||
* T1 expires 50ms from now
|
||||
* T2 expires 5s from now
|
||||
*
|
||||
* T1 is removed, so this code is called and would reprogram
|
||||
* the hardware to 5s from now. Any hrtimer_start after that
|
||||
* will not reprogram the hardware due to hang_detected being
|
||||
* set. So we'd effectivly block all timers until the T2 event
|
||||
* fires.
|
||||
*/
|
||||
if (cpu_base->hang_detected)
|
||||
return;
|
||||
|
||||
if (cpu_base->expires_next.tv64 != KTIME_MAX)
|
||||
tick_program_event(cpu_base->expires_next, 1);
|
||||
}
|
||||
|
@@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
|
||||
if (from > irq)
|
||||
return -EINVAL;
|
||||
from = irq;
|
||||
} else {
|
||||
/*
|
||||
* For interrupts which are freely allocated the
|
||||
* architecture can force a lower bound to the @from
|
||||
* argument. x86 uses this to exclude the GSI space.
|
||||
*/
|
||||
from = arch_dynirq_lower_bound(from);
|
||||
}
|
||||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
|
@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(debug_show_held_locks);
|
||||
|
||||
asmlinkage void lockdep_sys_exit(void)
|
||||
asmlinkage __visible void lockdep_sys_exit(void)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
|
@@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
|
||||
return -EFAULT;
|
||||
name[MODULE_NAME_LEN-1] = '\0';
|
||||
|
||||
if (!(flags & O_NONBLOCK))
|
||||
pr_warn("waiting module removal not supported: please upgrade\n");
|
||||
|
||||
if (mutex_lock_interruptible(&module_mutex) != 0)
|
||||
return -EINTR;
|
||||
|
||||
@@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
||||
|
||||
dynamic_debug_setup(info->debug, info->num_debug);
|
||||
|
||||
/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
|
||||
ftrace_module_init(mod);
|
||||
|
||||
/* Finally it's fully formed, ready to start executing. */
|
||||
err = complete_formation(mod, info);
|
||||
if (err)
|
||||
|
@@ -1586,7 +1586,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
asmlinkage int swsusp_save(void)
|
||||
asmlinkage __visible int swsusp_save(void)
|
||||
{
|
||||
unsigned int nr_pages, nr_highmem;
|
||||
|
||||
|
@@ -38,6 +38,7 @@ struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
|
||||
};
|
||||
|
||||
static const struct platform_suspend_ops *suspend_ops;
|
||||
static const struct platform_freeze_ops *freeze_ops;
|
||||
|
||||
static bool need_suspend_ops(suspend_state_t state)
|
||||
{
|
||||
@@ -47,6 +48,13 @@ static bool need_suspend_ops(suspend_state_t state)
|
||||
static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
|
||||
static bool suspend_freeze_wake;
|
||||
|
||||
void freeze_set_ops(const struct platform_freeze_ops *ops)
|
||||
{
|
||||
lock_system_sleep();
|
||||
freeze_ops = ops;
|
||||
unlock_system_sleep();
|
||||
}
|
||||
|
||||
static void freeze_begin(void)
|
||||
{
|
||||
suspend_freeze_wake = false;
|
||||
@@ -291,6 +299,10 @@ int suspend_devices_and_enter(suspend_state_t state)
|
||||
error = suspend_ops->begin(state);
|
||||
if (error)
|
||||
goto Close;
|
||||
} else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) {
|
||||
error = freeze_ops->begin();
|
||||
if (error)
|
||||
goto Close;
|
||||
}
|
||||
suspend_console();
|
||||
suspend_test_start();
|
||||
@@ -316,6 +328,9 @@ int suspend_devices_and_enter(suspend_state_t state)
|
||||
Close:
|
||||
if (need_suspend_ops(state) && suspend_ops->end)
|
||||
suspend_ops->end();
|
||||
else if (state == PM_SUSPEND_FREEZE && freeze_ops->end)
|
||||
freeze_ops->end();
|
||||
|
||||
trace_machine_suspend(PWR_EVENT_EXIT);
|
||||
return error;
|
||||
|
||||
|
@@ -1674,7 +1674,7 @@ EXPORT_SYMBOL(printk_emit);
|
||||
*
|
||||
* See the vsnprintf() documentation for format string extensions over C99.
|
||||
*/
|
||||
asmlinkage int printk(const char *fmt, ...)
|
||||
asmlinkage __visible int printk(const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
int r;
|
||||
@@ -1737,7 +1737,7 @@ void early_vprintk(const char *fmt, va_list ap)
|
||||
}
|
||||
}
|
||||
|
||||
asmlinkage void early_printk(const char *fmt, ...)
|
||||
asmlinkage __visible void early_printk(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
|
@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
|
||||
* schedule_tail - first thing a freshly forked thread must call.
|
||||
* @prev: the thread we just switched away from.
|
||||
*/
|
||||
asmlinkage void schedule_tail(struct task_struct *prev)
|
||||
asmlinkage __visible void schedule_tail(struct task_struct *prev)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
struct rq *rq = this_rq();
|
||||
@@ -2741,7 +2741,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
||||
blk_schedule_flush_plug(tsk);
|
||||
}
|
||||
|
||||
asmlinkage void __sched schedule(void)
|
||||
asmlinkage __visible void __sched schedule(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
@@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void)
|
||||
EXPORT_SYMBOL(schedule);
|
||||
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
asmlinkage void __sched schedule_user(void)
|
||||
asmlinkage __visible void __sched schedule_user(void)
|
||||
{
|
||||
/*
|
||||
* If we come here after a random call to set_need_resched(),
|
||||
@@ -2783,7 +2783,7 @@ void __sched schedule_preempt_disabled(void)
|
||||
* off of preempt_enable. Kernel preemptions off return from interrupt
|
||||
* occur there and call schedule directly.
|
||||
*/
|
||||
asmlinkage void __sched notrace preempt_schedule(void)
|
||||
asmlinkage __visible void __sched notrace preempt_schedule(void)
|
||||
{
|
||||
/*
|
||||
* If there is a non-zero preempt_count or interrupts are disabled,
|
||||
@@ -2813,7 +2813,7 @@ EXPORT_SYMBOL(preempt_schedule);
|
||||
* Note, that this is called and return with irqs disabled. This will
|
||||
* protect us against recursive calling from irq.
|
||||
*/
|
||||
asmlinkage void __sched preempt_schedule_irq(void)
|
||||
asmlinkage __visible void __sched preempt_schedule_irq(void)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
|
||||
|
@@ -223,7 +223,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
|
||||
static inline void lockdep_softirq_end(bool in_hardirq) { }
|
||||
#endif
|
||||
|
||||
asmlinkage void __do_softirq(void)
|
||||
asmlinkage __visible void __do_softirq(void)
|
||||
{
|
||||
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
||||
unsigned long old_flags = current->flags;
|
||||
@@ -299,7 +299,7 @@ restart:
|
||||
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
|
||||
}
|
||||
|
||||
asmlinkage void do_softirq(void)
|
||||
asmlinkage __visible void do_softirq(void)
|
||||
{
|
||||
__u32 pending;
|
||||
unsigned long flags;
|
||||
@@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
|
||||
{
|
||||
return from;
|
||||
}
|
||||
|
@@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
|
||||
|
||||
bit = find_last_bit(&mask, BITS_PER_LONG);
|
||||
|
||||
mask = (1 << bit) - 1;
|
||||
mask = (1UL << bit) - 1;
|
||||
|
||||
expires_limit = expires_limit & ~(mask);
|
||||
|
||||
|
@@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod,
|
||||
ftrace_process_locs(mod, start, end);
|
||||
}
|
||||
|
||||
static int ftrace_module_notify_enter(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
void ftrace_module_init(struct module *mod)
|
||||
{
|
||||
struct module *mod = data;
|
||||
|
||||
if (val == MODULE_STATE_COMING)
|
||||
ftrace_init_module(mod, mod->ftrace_callsites,
|
||||
mod->ftrace_callsites +
|
||||
mod->num_ftrace_callsites);
|
||||
return 0;
|
||||
ftrace_init_module(mod, mod->ftrace_callsites,
|
||||
mod->ftrace_callsites +
|
||||
mod->num_ftrace_callsites);
|
||||
}
|
||||
|
||||
static int ftrace_module_notify_exit(struct notifier_block *self,
|
||||
@@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int ftrace_module_notify_enter(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static int ftrace_module_notify_exit(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
@@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
struct notifier_block ftrace_module_enter_nb = {
|
||||
.notifier_call = ftrace_module_notify_enter,
|
||||
.priority = INT_MAX, /* Run before anything that can use kprobes */
|
||||
};
|
||||
|
||||
struct notifier_block ftrace_module_exit_nb = {
|
||||
.notifier_call = ftrace_module_notify_exit,
|
||||
.priority = INT_MIN, /* Run after anything that can remove kprobes */
|
||||
@@ -4403,10 +4388,6 @@ void __init ftrace_init(void)
|
||||
__start_mcount_loc,
|
||||
__stop_mcount_loc);
|
||||
|
||||
ret = register_module_notifier(&ftrace_module_enter_nb);
|
||||
if (ret)
|
||||
pr_warning("Failed to register trace ftrace module enter notifier\n");
|
||||
|
||||
ret = register_module_notifier(&ftrace_module_exit_nb);
|
||||
if (ret)
|
||||
pr_warning("Failed to register trace ftrace module exit notifier\n");
|
||||
|
@@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec)
|
||||
data->ops->func(data);
|
||||
continue;
|
||||
}
|
||||
filter = rcu_dereference(data->filter);
|
||||
filter = rcu_dereference_sched(data->filter);
|
||||
if (filter && !filter_match_preds(filter, rec))
|
||||
continue;
|
||||
if (data->cmd_ops->post_trigger) {
|
||||
|
@@ -188,7 +188,6 @@ static int tracepoint_add_func(struct tracepoint *tp,
|
||||
WARN_ON_ONCE(1);
|
||||
return PTR_ERR(old);
|
||||
}
|
||||
release_probes(old);
|
||||
|
||||
/*
|
||||
* rcu_assign_pointer has a smp_wmb() which makes sure that the new
|
||||
@@ -200,6 +199,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
|
||||
rcu_assign_pointer(tp->funcs, tp_funcs);
|
||||
if (!static_key_enabled(&tp->key))
|
||||
static_key_slow_inc(&tp->key);
|
||||
release_probes(old);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -221,7 +221,6 @@ static int tracepoint_remove_func(struct tracepoint *tp,
|
||||
WARN_ON_ONCE(1);
|
||||
return PTR_ERR(old);
|
||||
}
|
||||
release_probes(old);
|
||||
|
||||
if (!tp_funcs) {
|
||||
/* Removed last function */
|
||||
@@ -232,6 +231,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
|
||||
static_key_slow_dec(&tp->key);
|
||||
}
|
||||
rcu_assign_pointer(tp->funcs, tp_funcs);
|
||||
release_probes(old);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user