Merge branch 'x86/cpu' into x86/core
Conflicts: arch/x86/kernel/cpu/feature_names.c include/asm-x86/cpufeature.h
This commit is contained in:
@@ -486,17 +486,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __capable(struct task_struct *t, int cap)
|
||||
/**
|
||||
* capable - Determine if the current task has a superior capability in effect
|
||||
* @cap: The capability to be tested for
|
||||
*
|
||||
* Return true if the current task has the given superior capability currently
|
||||
* available for use, false if not.
|
||||
*
|
||||
* This sets PF_SUPERPRIV on the task if the capability is available on the
|
||||
* assumption that it's about to be used.
|
||||
*/
|
||||
int capable(int cap)
|
||||
{
|
||||
if (security_capable(t, cap) == 0) {
|
||||
t->flags |= PF_SUPERPRIV;
|
||||
if (has_capability(current, cap)) {
|
||||
current->flags |= PF_SUPERPRIV;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int capable(int cap)
|
||||
{
|
||||
return __capable(current, cap);
|
||||
}
|
||||
EXPORT_SYMBOL(capable);
|
||||
|
@@ -12,7 +12,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/syscalls.h>
|
||||
@@ -77,7 +77,7 @@ int kexec_should_crash(struct task_struct *p)
|
||||
*
|
||||
* The code for the transition from the current kernel to the
|
||||
* the new kernel is placed in the control_code_buffer, whose size
|
||||
* is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single
|
||||
* is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
|
||||
* page of memory is necessary, but some architectures require more.
|
||||
* Because this memory must be identity mapped in the transition from
|
||||
* virtual to physical addresses it must live in the range
|
||||
@@ -242,7 +242,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
|
||||
*/
|
||||
result = -ENOMEM;
|
||||
image->control_code_page = kimage_alloc_control_pages(image,
|
||||
get_order(KEXEC_CONTROL_CODE_SIZE));
|
||||
get_order(KEXEC_CONTROL_PAGE_SIZE));
|
||||
if (!image->control_code_page) {
|
||||
printk(KERN_ERR "Could not allocate control_code_buffer\n");
|
||||
goto out;
|
||||
@@ -317,7 +317,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
|
||||
*/
|
||||
result = -ENOMEM;
|
||||
image->control_code_page = kimage_alloc_control_pages(image,
|
||||
get_order(KEXEC_CONTROL_CODE_SIZE));
|
||||
get_order(KEXEC_CONTROL_PAGE_SIZE));
|
||||
if (!image->control_code_page) {
|
||||
printk(KERN_ERR "Could not allocate control_code_buffer\n");
|
||||
goto out;
|
||||
@@ -924,19 +924,14 @@ static int kimage_load_segment(struct kimage *image,
|
||||
*/
|
||||
struct kimage *kexec_image;
|
||||
struct kimage *kexec_crash_image;
|
||||
/*
|
||||
* A home grown binary mutex.
|
||||
* Nothing can wait so this mutex is safe to use
|
||||
* in interrupt context :)
|
||||
*/
|
||||
static int kexec_lock;
|
||||
|
||||
static DEFINE_MUTEX(kexec_mutex);
|
||||
|
||||
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct kimage **dest_image, *image;
|
||||
int locked;
|
||||
int result;
|
||||
|
||||
/* We only trust the superuser with rebooting the system. */
|
||||
@@ -972,8 +967,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
||||
*
|
||||
* KISS: always take the mutex.
|
||||
*/
|
||||
locked = xchg(&kexec_lock, 1);
|
||||
if (locked)
|
||||
if (!mutex_trylock(&kexec_mutex))
|
||||
return -EBUSY;
|
||||
|
||||
dest_image = &kexec_image;
|
||||
@@ -1015,8 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
||||
image = xchg(dest_image, image);
|
||||
|
||||
out:
|
||||
locked = xchg(&kexec_lock, 0); /* Release the mutex */
|
||||
BUG_ON(!locked);
|
||||
mutex_unlock(&kexec_mutex);
|
||||
kimage_free(image);
|
||||
|
||||
return result;
|
||||
@@ -1063,10 +1056,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
|
||||
|
||||
void crash_kexec(struct pt_regs *regs)
|
||||
{
|
||||
int locked;
|
||||
|
||||
|
||||
/* Take the kexec_lock here to prevent sys_kexec_load
|
||||
/* Take the kexec_mutex here to prevent sys_kexec_load
|
||||
* running on one cpu from replacing the crash kernel
|
||||
* we are using after a panic on a different cpu.
|
||||
*
|
||||
@@ -1074,8 +1064,7 @@ void crash_kexec(struct pt_regs *regs)
|
||||
* of memory the xchg(&kexec_crash_image) would be
|
||||
* sufficient. But since I reuse the memory...
|
||||
*/
|
||||
locked = xchg(&kexec_lock, 1);
|
||||
if (!locked) {
|
||||
if (mutex_trylock(&kexec_mutex)) {
|
||||
if (kexec_crash_image) {
|
||||
struct pt_regs fixed_regs;
|
||||
crash_setup_regs(&fixed_regs, regs);
|
||||
@@ -1083,8 +1072,7 @@ void crash_kexec(struct pt_regs *regs)
|
||||
machine_crash_shutdown(&fixed_regs);
|
||||
machine_kexec(kexec_crash_image);
|
||||
}
|
||||
locked = xchg(&kexec_lock, 0);
|
||||
BUG_ON(!locked);
|
||||
mutex_unlock(&kexec_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1426,25 +1414,23 @@ static int __init crash_save_vmcoreinfo_init(void)
|
||||
|
||||
module_init(crash_save_vmcoreinfo_init)
|
||||
|
||||
/**
|
||||
* kernel_kexec - reboot the system
|
||||
*
|
||||
* Move into place and start executing a preloaded standalone
|
||||
* executable. If nothing was preloaded return an error.
|
||||
/*
|
||||
* Move into place and start executing a preloaded standalone
|
||||
* executable. If nothing was preloaded return an error.
|
||||
*/
|
||||
int kernel_kexec(void)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (xchg(&kexec_lock, 1))
|
||||
if (!mutex_trylock(&kexec_mutex))
|
||||
return -EBUSY;
|
||||
if (!kexec_image) {
|
||||
error = -EINVAL;
|
||||
goto Unlock;
|
||||
}
|
||||
|
||||
if (kexec_image->preserve_context) {
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
if (kexec_image->preserve_context) {
|
||||
mutex_lock(&pm_mutex);
|
||||
pm_prepare_console();
|
||||
error = freeze_processes();
|
||||
@@ -1459,6 +1445,7 @@ int kernel_kexec(void)
|
||||
error = disable_nonboot_cpus();
|
||||
if (error)
|
||||
goto Resume_devices;
|
||||
device_pm_lock();
|
||||
local_irq_disable();
|
||||
/* At this point, device_suspend() has been called,
|
||||
* but *not* device_power_down(). We *must*
|
||||
@@ -1470,26 +1457,22 @@ int kernel_kexec(void)
|
||||
error = device_power_down(PMSG_FREEZE);
|
||||
if (error)
|
||||
goto Enable_irqs;
|
||||
save_processor_state();
|
||||
} else
|
||||
#endif
|
||||
} else {
|
||||
blocking_notifier_call_chain(&reboot_notifier_list,
|
||||
SYS_RESTART, NULL);
|
||||
system_state = SYSTEM_RESTART;
|
||||
device_shutdown();
|
||||
sysdev_shutdown();
|
||||
{
|
||||
kernel_restart_prepare(NULL);
|
||||
printk(KERN_EMERG "Starting new kernel\n");
|
||||
machine_shutdown();
|
||||
}
|
||||
|
||||
machine_kexec(kexec_image);
|
||||
|
||||
if (kexec_image->preserve_context) {
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
restore_processor_state();
|
||||
if (kexec_image->preserve_context) {
|
||||
device_power_up(PMSG_RESTORE);
|
||||
Enable_irqs:
|
||||
local_irq_enable();
|
||||
device_pm_unlock();
|
||||
enable_nonboot_cpus();
|
||||
Resume_devices:
|
||||
device_resume(PMSG_RESTORE);
|
||||
@@ -1499,11 +1482,10 @@ int kernel_kexec(void)
|
||||
Restore_console:
|
||||
pm_restore_console();
|
||||
mutex_unlock(&pm_mutex);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
Unlock:
|
||||
xchg(&kexec_lock, 0);
|
||||
|
||||
mutex_unlock(&kexec_mutex);
|
||||
return error;
|
||||
}
|
||||
|
@@ -1759,11 +1759,10 @@ static void check_chain_key(struct task_struct *curr)
|
||||
hlock = curr->held_locks + i;
|
||||
if (chain_key != hlock->prev_chain_key) {
|
||||
debug_locks_off();
|
||||
printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
|
||||
WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
|
||||
curr->lockdep_depth, i,
|
||||
(unsigned long long)chain_key,
|
||||
(unsigned long long)hlock->prev_chain_key);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
id = hlock->class_idx - 1;
|
||||
@@ -1778,11 +1777,10 @@ static void check_chain_key(struct task_struct *curr)
|
||||
}
|
||||
if (chain_key != curr->curr_chain_key) {
|
||||
debug_locks_off();
|
||||
printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
|
||||
WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
|
||||
curr->lockdep_depth, i,
|
||||
(unsigned long long)chain_key,
|
||||
(unsigned long long)curr->curr_chain_key);
|
||||
WARN_ON(1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -2584,7 +2582,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
hlock->trylock = trylock;
|
||||
hlock->read = read;
|
||||
hlock->check = check;
|
||||
hlock->hardirqs_off = hardirqs_off;
|
||||
hlock->hardirqs_off = !!hardirqs_off;
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
hlock->waittime_stamp = 0;
|
||||
hlock->holdtime_stamp = sched_clock();
|
||||
|
@@ -50,8 +50,21 @@ extern unsigned int nr_process_chains;
|
||||
extern unsigned int max_lockdep_depth;
|
||||
extern unsigned int max_recursion_depth;
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
extern unsigned long lockdep_count_forward_deps(struct lock_class *);
|
||||
extern unsigned long lockdep_count_backward_deps(struct lock_class *);
|
||||
#else
|
||||
static inline unsigned long
|
||||
lockdep_count_forward_deps(struct lock_class *class)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline unsigned long
|
||||
lockdep_count_backward_deps(struct lock_class *class)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
/*
|
||||
|
@@ -82,7 +82,6 @@ static void print_name(struct seq_file *m, struct lock_class *class)
|
||||
|
||||
static int l_show(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned long nr_forward_deps, nr_backward_deps;
|
||||
struct lock_class *class = v;
|
||||
struct lock_list *entry;
|
||||
char c1, c2, c3, c4;
|
||||
@@ -96,11 +95,10 @@ static int l_show(struct seq_file *m, void *v)
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
seq_printf(m, " OPS:%8ld", class->ops);
|
||||
#endif
|
||||
nr_forward_deps = lockdep_count_forward_deps(class);
|
||||
seq_printf(m, " FD:%5ld", nr_forward_deps);
|
||||
|
||||
nr_backward_deps = lockdep_count_backward_deps(class);
|
||||
seq_printf(m, " BD:%5ld", nr_backward_deps);
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
|
||||
seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
|
||||
#endif
|
||||
|
||||
get_usage_chars(class, &c1, &c2, &c3, &c4);
|
||||
seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
|
||||
@@ -325,7 +323,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
|
||||
if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
|
||||
nr_hardirq_read_unsafe++;
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
sum_forward_deps += lockdep_count_forward_deps(class);
|
||||
#endif
|
||||
}
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
|
||||
|
@@ -14,7 +14,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/mnt_namespace.h>
|
||||
|
@@ -14,7 +14,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/genhd.h>
|
||||
|
@@ -140,7 +140,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
|
||||
if (!dumpable && !capable(CAP_SYS_PTRACE))
|
||||
return -EPERM;
|
||||
|
||||
return security_ptrace(current, task, mode);
|
||||
return security_ptrace_may_access(task, mode);
|
||||
}
|
||||
|
||||
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
|
||||
@@ -499,8 +499,7 @@ repeat:
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
ret = security_ptrace(current->parent, current,
|
||||
PTRACE_MODE_ATTACH);
|
||||
ret = security_ptrace_traceme(current->parent);
|
||||
|
||||
/*
|
||||
* Set the ptrace bit in the process ptrace flags.
|
||||
|
@@ -77,6 +77,7 @@ void wakeme_after_rcu(struct rcu_head *head)
|
||||
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
|
||||
* and may be nested.
|
||||
*/
|
||||
void synchronize_rcu(void); /* Makes kernel-doc tools happy */
|
||||
synchronize_rcu_xxx(synchronize_rcu, call_rcu)
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
||||
|
||||
|
@@ -808,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
|
||||
|
||||
/*
|
||||
* ratelimit for updating the group shares.
|
||||
* default: 0.5ms
|
||||
* default: 0.25ms
|
||||
*/
|
||||
const_debug unsigned int sysctl_sched_shares_ratelimit = 500000;
|
||||
unsigned int sysctl_sched_shares_ratelimit = 250000;
|
||||
|
||||
/*
|
||||
* period over which we measure -rt task cpu usage in us.
|
||||
@@ -4669,6 +4669,52 @@ int __sched wait_for_completion_killable(struct completion *x)
|
||||
}
|
||||
EXPORT_SYMBOL(wait_for_completion_killable);
|
||||
|
||||
/**
|
||||
* try_wait_for_completion - try to decrement a completion without blocking
|
||||
* @x: completion structure
|
||||
*
|
||||
* Returns: 0 if a decrement cannot be done without blocking
|
||||
* 1 if a decrement succeeded.
|
||||
*
|
||||
* If a completion is being used as a counting completion,
|
||||
* attempt to decrement the counter without blocking. This
|
||||
* enables us to avoid waiting if the resource the completion
|
||||
* is protecting is not available.
|
||||
*/
|
||||
bool try_wait_for_completion(struct completion *x)
|
||||
{
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irq(&x->wait.lock);
|
||||
if (!x->done)
|
||||
ret = 0;
|
||||
else
|
||||
x->done--;
|
||||
spin_unlock_irq(&x->wait.lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(try_wait_for_completion);
|
||||
|
||||
/**
|
||||
* completion_done - Test to see if a completion has any waiters
|
||||
* @x: completion structure
|
||||
*
|
||||
* Returns: 0 if there are waiters (wait_for_completion() in progress)
|
||||
* 1 if there are no waiters.
|
||||
*
|
||||
*/
|
||||
bool completion_done(struct completion *x)
|
||||
{
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irq(&x->wait.lock);
|
||||
if (!x->done)
|
||||
ret = 0;
|
||||
spin_unlock_irq(&x->wait.lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(completion_done);
|
||||
|
||||
static long __sched
|
||||
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
|
||||
{
|
||||
@@ -5740,6 +5786,8 @@ static inline void sched_init_granularity(void)
|
||||
sysctl_sched_latency = limit;
|
||||
|
||||
sysctl_sched_wakeup_granularity *= factor;
|
||||
|
||||
sysctl_sched_shares_ratelimit *= factor;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -8462,8 +8510,8 @@ struct task_group *sched_create_group(struct task_group *parent)
|
||||
WARN_ON(!parent); /* root should already exist */
|
||||
|
||||
tg->parent = parent;
|
||||
list_add_rcu(&tg->siblings, &parent->children);
|
||||
INIT_LIST_HEAD(&tg->children);
|
||||
list_add_rcu(&tg->siblings, &parent->children);
|
||||
spin_unlock_irqrestore(&task_group_lock, flags);
|
||||
|
||||
return tg;
|
||||
|
@@ -8,6 +8,6 @@ SCHED_FEAT(SYNC_WAKEUPS, 1)
|
||||
SCHED_FEAT(HRTICK, 1)
|
||||
SCHED_FEAT(DOUBLE_TICK, 0)
|
||||
SCHED_FEAT(ASYM_GRAN, 1)
|
||||
SCHED_FEAT(LB_BIAS, 0)
|
||||
SCHED_FEAT(LB_BIAS, 1)
|
||||
SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
|
||||
SCHED_FEAT(ASYM_EFF_LOAD, 1)
|
||||
|
@@ -298,7 +298,7 @@ static void __disable_runtime(struct rq *rq)
|
||||
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
||||
s64 diff;
|
||||
|
||||
if (iter == rt_rq)
|
||||
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
|
||||
continue;
|
||||
|
||||
spin_lock(&iter->rt_runtime_lock);
|
||||
|
@@ -1338,6 +1338,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
|
||||
struct siginfo info;
|
||||
unsigned long flags;
|
||||
struct sighand_struct *psig;
|
||||
int ret = sig;
|
||||
|
||||
BUG_ON(sig == -1);
|
||||
|
||||
@@ -1402,7 +1403,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
|
||||
* is implementation-defined: we do (if you don't want
|
||||
* it, just use SIG_IGN instead).
|
||||
*/
|
||||
tsk->exit_signal = -1;
|
||||
ret = tsk->exit_signal = -1;
|
||||
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
|
||||
sig = -1;
|
||||
}
|
||||
@@ -1411,7 +1412,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
|
||||
__wake_up_parent(tsk, tsk->parent);
|
||||
spin_unlock_irqrestore(&psig->siglock, flags);
|
||||
|
||||
return sig;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
|
||||
|
10
kernel/smp.c
10
kernel/smp.c
@@ -210,8 +210,10 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||
{
|
||||
struct call_single_data d;
|
||||
unsigned long flags;
|
||||
/* prevent preemption and reschedule on another processor */
|
||||
/* prevent preemption and reschedule on another processor,
|
||||
as well as CPU removal */
|
||||
int me = get_cpu();
|
||||
int err = 0;
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
@@ -220,7 +222,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||
local_irq_save(flags);
|
||||
func(info);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
} else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) {
|
||||
struct call_single_data *data = NULL;
|
||||
|
||||
if (!wait) {
|
||||
@@ -236,10 +238,12 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||
data->func = func;
|
||||
data->info = info;
|
||||
generic_exec_single(cpu, data);
|
||||
} else {
|
||||
err = -ENXIO; /* CPU not online */
|
||||
}
|
||||
|
||||
put_cpu();
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_single);
|
||||
|
||||
|
@@ -290,7 +290,6 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
|
||||
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
||||
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(_spin_lock_nested);
|
||||
|
||||
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
|
||||
@@ -312,7 +311,6 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
|
||||
#endif
|
||||
return flags;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
|
||||
|
||||
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
|
||||
@@ -322,7 +320,6 @@ void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
|
||||
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
|
||||
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(_spin_lock_nest_lock);
|
||||
|
||||
#endif
|
||||
|
10
kernel/sys.c
10
kernel/sys.c
@@ -169,9 +169,9 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
|
||||
pgrp = find_vpid(who);
|
||||
else
|
||||
pgrp = task_pgrp(current);
|
||||
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
|
||||
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
|
||||
error = set_one_prio(p, niceval, error);
|
||||
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
|
||||
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
|
||||
break;
|
||||
case PRIO_USER:
|
||||
user = current->user;
|
||||
@@ -229,11 +229,11 @@ asmlinkage long sys_getpriority(int which, int who)
|
||||
pgrp = find_vpid(who);
|
||||
else
|
||||
pgrp = task_pgrp(current);
|
||||
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
|
||||
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
|
||||
niceval = 20 - task_nice(p);
|
||||
if (niceval > retval)
|
||||
retval = niceval;
|
||||
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
|
||||
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
|
||||
break;
|
||||
case PRIO_USER:
|
||||
user = current->user;
|
||||
@@ -274,7 +274,7 @@ void emergency_restart(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(emergency_restart);
|
||||
|
||||
static void kernel_restart_prepare(char *cmd)
|
||||
void kernel_restart_prepare(char *cmd)
|
||||
{
|
||||
blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
|
||||
system_state = SYSTEM_RESTART;
|
||||
|
@@ -643,17 +643,21 @@ void tick_setup_sched_timer(void)
|
||||
ts->nohz_mode = NOHZ_MODE_HIGHRES;
|
||||
#endif
|
||||
}
|
||||
#endif /* HIGH_RES_TIMERS */
|
||||
|
||||
#if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS
|
||||
void tick_cancel_sched_timer(int cpu)
|
||||
{
|
||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
|
||||
# ifdef CONFIG_HIGH_RES_TIMERS
|
||||
if (ts->sched_timer.base)
|
||||
hrtimer_cancel(&ts->sched_timer);
|
||||
# endif
|
||||
|
||||
ts->nohz_mode = NOHZ_MODE_INACTIVE;
|
||||
}
|
||||
#endif /* HIGH_RES_TIMERS */
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Async notification about clocksource changes
|
||||
|
@@ -6,7 +6,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/user_namespace.h>
|
||||
|
@@ -12,7 +12,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/uts.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@@ -12,7 +12,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/uts.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
static void *get_uts(ctl_table *table, int write)
|
||||
|
Reference in New Issue
Block a user