Merge ../powerpc-merge
This commit is contained in:
@@ -841,7 +841,7 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
|
||||
|
||||
for (aux = context->aux; aux; aux = aux->next) {
|
||||
|
||||
ab = audit_log_start(context, GFP_KERNEL, aux->type);
|
||||
ab = audit_log_start(context, gfp_mask, aux->type);
|
||||
if (!ab)
|
||||
continue; /* audit_panic has been called */
|
||||
|
||||
@@ -878,14 +878,14 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
|
||||
}
|
||||
|
||||
if (context->pwd && context->pwdmnt) {
|
||||
ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD);
|
||||
ab = audit_log_start(context, gfp_mask, AUDIT_CWD);
|
||||
if (ab) {
|
||||
audit_log_d_path(ab, "cwd=", context->pwd, context->pwdmnt);
|
||||
audit_log_end(ab);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < context->name_count; i++) {
|
||||
ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
|
||||
ab = audit_log_start(context, gfp_mask, AUDIT_PATH);
|
||||
if (!ab)
|
||||
continue; /* audit_panic has been called */
|
||||
|
||||
|
@@ -1977,6 +1977,39 @@ void cpuset_fork(struct task_struct *child)
|
||||
* We don't need to task_lock() this reference to tsk->cpuset,
|
||||
* because tsk is already marked PF_EXITING, so attach_task() won't
|
||||
* mess with it, or task is a failed fork, never visible to attach_task.
|
||||
*
|
||||
* Hack:
|
||||
*
|
||||
* Set the exiting tasks cpuset to the root cpuset (top_cpuset).
|
||||
*
|
||||
* Don't leave a task unable to allocate memory, as that is an
|
||||
* accident waiting to happen should someone add a callout in
|
||||
* do_exit() after the cpuset_exit() call that might allocate.
|
||||
* If a task tries to allocate memory with an invalid cpuset,
|
||||
* it will oops in cpuset_update_task_memory_state().
|
||||
*
|
||||
* We call cpuset_exit() while the task is still competent to
|
||||
* handle notify_on_release(), then leave the task attached to
|
||||
* the root cpuset (top_cpuset) for the remainder of its exit.
|
||||
*
|
||||
* To do this properly, we would increment the reference count on
|
||||
* top_cpuset, and near the very end of the kernel/exit.c do_exit()
|
||||
* code we would add a second cpuset function call, to drop that
|
||||
* reference. This would just create an unnecessary hot spot on
|
||||
* the top_cpuset reference count, to no avail.
|
||||
*
|
||||
* Normally, holding a reference to a cpuset without bumping its
|
||||
* count is unsafe. The cpuset could go away, or someone could
|
||||
* attach us to a different cpuset, decrementing the count on
|
||||
* the first cpuset that we never incremented. But in this case,
|
||||
* top_cpuset isn't going away, and either task has PF_EXITING set,
|
||||
* which wards off any attach_task() attempts, or task is a failed
|
||||
* fork, never visible to attach_task.
|
||||
*
|
||||
* Another way to do this would be to set the cpuset pointer
|
||||
* to NULL here, and check in cpuset_update_task_memory_state()
|
||||
* for a NULL pointer. This hack avoids that NULL check, for no
|
||||
* cost (other than this way too long comment ;).
|
||||
**/
|
||||
|
||||
void cpuset_exit(struct task_struct *tsk)
|
||||
@@ -1984,7 +2017,7 @@ void cpuset_exit(struct task_struct *tsk)
|
||||
struct cpuset *cs;
|
||||
|
||||
cs = tsk->cpuset;
|
||||
tsk->cpuset = NULL;
|
||||
tsk->cpuset = &top_cpuset; /* Hack - see comment above */
|
||||
|
||||
if (notify_on_release(cs)) {
|
||||
char *pathbuf = NULL;
|
||||
|
@@ -360,6 +360,9 @@ void daemonize(const char *name, ...)
|
||||
fs = init_task.fs;
|
||||
current->fs = fs;
|
||||
atomic_inc(&fs->count);
|
||||
exit_namespace(current);
|
||||
current->namespace = init_task.namespace;
|
||||
get_namespace(current->namespace);
|
||||
exit_files(current);
|
||||
current->files = init_task.files;
|
||||
atomic_inc(¤t->files->count);
|
||||
|
@@ -1123,8 +1123,8 @@ static task_t *copy_process(unsigned long clone_flags,
|
||||
p->real_parent = current;
|
||||
p->parent = p->real_parent;
|
||||
|
||||
spin_lock(¤t->sighand->siglock);
|
||||
if (clone_flags & CLONE_THREAD) {
|
||||
spin_lock(¤t->sighand->siglock);
|
||||
/*
|
||||
* Important: if an exit-all has been started then
|
||||
* do not create this new thread - the whole thread
|
||||
@@ -1162,8 +1162,6 @@ static task_t *copy_process(unsigned long clone_flags,
|
||||
*/
|
||||
p->it_prof_expires = jiffies_to_cputime(1);
|
||||
}
|
||||
|
||||
spin_unlock(¤t->sighand->siglock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1175,8 +1173,6 @@ static task_t *copy_process(unsigned long clone_flags,
|
||||
if (unlikely(p->ptrace & PT_PTRACED))
|
||||
__ptrace_link(p, current->parent);
|
||||
|
||||
attach_pid(p, PIDTYPE_PID, p->pid);
|
||||
attach_pid(p, PIDTYPE_TGID, p->tgid);
|
||||
if (thread_group_leader(p)) {
|
||||
p->signal->tty = current->signal->tty;
|
||||
p->signal->pgrp = process_group(current);
|
||||
@@ -1186,9 +1182,12 @@ static task_t *copy_process(unsigned long clone_flags,
|
||||
if (p->pid)
|
||||
__get_cpu_var(process_counts)++;
|
||||
}
|
||||
attach_pid(p, PIDTYPE_TGID, p->tgid);
|
||||
attach_pid(p, PIDTYPE_PID, p->pid);
|
||||
|
||||
nr_threads++;
|
||||
total_forks++;
|
||||
spin_unlock(¤t->sighand->siglock);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
proc_fork_connector(p);
|
||||
return p;
|
||||
|
@@ -418,8 +418,19 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
|
||||
/* Switch the timer base, if necessary: */
|
||||
new_base = switch_hrtimer_base(timer, base);
|
||||
|
||||
if (mode == HRTIMER_REL)
|
||||
if (mode == HRTIMER_REL) {
|
||||
tim = ktime_add(tim, new_base->get_time());
|
||||
/*
|
||||
* CONFIG_TIME_LOW_RES is a temporary way for architectures
|
||||
* to signal that they simply return xtime in
|
||||
* do_gettimeoffset(). In this case we want to round up by
|
||||
* resolution when starting a relative timer, to avoid short
|
||||
* timeouts. This will go away with the GTOD framework.
|
||||
*/
|
||||
#ifdef CONFIG_TIME_LOW_RES
|
||||
tim = ktime_add(tim, base->resolution);
|
||||
#endif
|
||||
}
|
||||
timer->expires = tim;
|
||||
|
||||
enqueue_hrtimer(timer, new_base);
|
||||
|
@@ -130,6 +130,7 @@ NORET_TYPE void panic(const char * fmt, ...)
|
||||
#endif
|
||||
local_irq_enable();
|
||||
for (i = 0;;) {
|
||||
touch_softlockup_watchdog();
|
||||
i += panic_blink(i);
|
||||
mdelay(1);
|
||||
i++;
|
||||
|
@@ -91,10 +91,8 @@ static int save_highmem_zone(struct zone *zone)
|
||||
* corrected eventually when the cases giving rise to this
|
||||
* are better understood.
|
||||
*/
|
||||
if (PageReserved(page)) {
|
||||
printk("highmem reserved page?!\n");
|
||||
if (PageReserved(page))
|
||||
continue;
|
||||
}
|
||||
BUG_ON(PageNosave(page));
|
||||
if (PageNosaveFree(page))
|
||||
continue;
|
||||
|
@@ -153,13 +153,11 @@ static int swsusp_swap_check(void) /* This is called before saving image */
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!swsusp_resume_device)
|
||||
return -ENODEV;
|
||||
spin_lock(&swap_lock);
|
||||
for (i = 0; i < MAX_SWAPFILES; i++) {
|
||||
if (!(swap_info[i].flags & SWP_WRITEOK))
|
||||
continue;
|
||||
if (is_resume_device(swap_info + i)) {
|
||||
if (!swsusp_resume_device || is_resume_device(swap_info + i)) {
|
||||
spin_unlock(&swap_lock);
|
||||
root_swap = i;
|
||||
return 0;
|
||||
|
@@ -72,8 +72,8 @@ void ptrace_untrace(task_t *child)
|
||||
*/
|
||||
void __ptrace_unlink(task_t *child)
|
||||
{
|
||||
if (!child->ptrace)
|
||||
BUG();
|
||||
BUG_ON(!child->ptrace);
|
||||
|
||||
child->ptrace = 0;
|
||||
if (!list_empty(&child->ptrace_list)) {
|
||||
list_del_init(&child->ptrace_list);
|
||||
@@ -184,22 +184,27 @@ bad:
|
||||
return retval;
|
||||
}
|
||||
|
||||
int ptrace_detach(struct task_struct *child, unsigned int data)
|
||||
void __ptrace_detach(struct task_struct *child, unsigned int data)
|
||||
{
|
||||
if (!valid_signal(data))
|
||||
return -EIO;
|
||||
|
||||
/* Architecture-specific hardware disable .. */
|
||||
ptrace_disable(child);
|
||||
|
||||
/* .. re-parent .. */
|
||||
child->exit_code = data;
|
||||
|
||||
write_lock_irq(&tasklist_lock);
|
||||
/* .. re-parent .. */
|
||||
__ptrace_unlink(child);
|
||||
/* .. and wake it up. */
|
||||
if (child->exit_state != EXIT_ZOMBIE)
|
||||
wake_up_process(child);
|
||||
}
|
||||
|
||||
int ptrace_detach(struct task_struct *child, unsigned int data)
|
||||
{
|
||||
if (!valid_signal(data))
|
||||
return -EIO;
|
||||
|
||||
/* Architecture-specific hardware disable .. */
|
||||
ptrace_disable(child);
|
||||
|
||||
write_lock_irq(&tasklist_lock);
|
||||
if (child->ptrace)
|
||||
__ptrace_detach(child, data);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
return 0;
|
||||
@@ -242,8 +247,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
|
||||
if (write) {
|
||||
copy_to_user_page(vma, page, addr,
|
||||
maddr + offset, buf, bytes);
|
||||
if (!PageCompound(page))
|
||||
set_page_dirty_lock(page);
|
||||
set_page_dirty_lock(page);
|
||||
} else {
|
||||
copy_from_user_page(vma, page, addr,
|
||||
buf, maddr + offset, bytes);
|
||||
|
160
kernel/sched.c
160
kernel/sched.c
@@ -215,7 +215,6 @@ struct runqueue {
|
||||
*/
|
||||
unsigned long nr_running;
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long prio_bias;
|
||||
unsigned long cpu_load[3];
|
||||
#endif
|
||||
unsigned long long nr_switches;
|
||||
@@ -669,68 +668,13 @@ static int effective_prio(task_t *p)
|
||||
return prio;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void inc_prio_bias(runqueue_t *rq, int prio)
|
||||
{
|
||||
rq->prio_bias += MAX_PRIO - prio;
|
||||
}
|
||||
|
||||
static inline void dec_prio_bias(runqueue_t *rq, int prio)
|
||||
{
|
||||
rq->prio_bias -= MAX_PRIO - prio;
|
||||
}
|
||||
|
||||
static inline void inc_nr_running(task_t *p, runqueue_t *rq)
|
||||
{
|
||||
rq->nr_running++;
|
||||
if (rt_task(p)) {
|
||||
if (p != rq->migration_thread)
|
||||
/*
|
||||
* The migration thread does the actual balancing. Do
|
||||
* not bias by its priority as the ultra high priority
|
||||
* will skew balancing adversely.
|
||||
*/
|
||||
inc_prio_bias(rq, p->prio);
|
||||
} else
|
||||
inc_prio_bias(rq, p->static_prio);
|
||||
}
|
||||
|
||||
static inline void dec_nr_running(task_t *p, runqueue_t *rq)
|
||||
{
|
||||
rq->nr_running--;
|
||||
if (rt_task(p)) {
|
||||
if (p != rq->migration_thread)
|
||||
dec_prio_bias(rq, p->prio);
|
||||
} else
|
||||
dec_prio_bias(rq, p->static_prio);
|
||||
}
|
||||
#else
|
||||
static inline void inc_prio_bias(runqueue_t *rq, int prio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dec_prio_bias(runqueue_t *rq, int prio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void inc_nr_running(task_t *p, runqueue_t *rq)
|
||||
{
|
||||
rq->nr_running++;
|
||||
}
|
||||
|
||||
static inline void dec_nr_running(task_t *p, runqueue_t *rq)
|
||||
{
|
||||
rq->nr_running--;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* __activate_task - move a task to the runqueue.
|
||||
*/
|
||||
static inline void __activate_task(task_t *p, runqueue_t *rq)
|
||||
{
|
||||
enqueue_task(p, rq->active);
|
||||
inc_nr_running(p, rq);
|
||||
rq->nr_running++;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -739,7 +683,7 @@ static inline void __activate_task(task_t *p, runqueue_t *rq)
|
||||
static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
|
||||
{
|
||||
enqueue_task_head(p, rq->active);
|
||||
inc_nr_running(p, rq);
|
||||
rq->nr_running++;
|
||||
}
|
||||
|
||||
static int recalc_task_prio(task_t *p, unsigned long long now)
|
||||
@@ -863,7 +807,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
|
||||
*/
|
||||
static void deactivate_task(struct task_struct *p, runqueue_t *rq)
|
||||
{
|
||||
dec_nr_running(p, rq);
|
||||
rq->nr_running--;
|
||||
dequeue_task(p, p->array);
|
||||
p->array = NULL;
|
||||
}
|
||||
@@ -1007,61 +951,27 @@ void kick_process(task_t *p)
|
||||
* We want to under-estimate the load of migration sources, to
|
||||
* balance conservatively.
|
||||
*/
|
||||
static unsigned long __source_load(int cpu, int type, enum idle_type idle)
|
||||
{
|
||||
runqueue_t *rq = cpu_rq(cpu);
|
||||
unsigned long running = rq->nr_running;
|
||||
unsigned long source_load, cpu_load = rq->cpu_load[type-1],
|
||||
load_now = running * SCHED_LOAD_SCALE;
|
||||
|
||||
if (type == 0)
|
||||
source_load = load_now;
|
||||
else
|
||||
source_load = min(cpu_load, load_now);
|
||||
|
||||
if (running > 1 || (idle == NOT_IDLE && running))
|
||||
/*
|
||||
* If we are busy rebalancing the load is biased by
|
||||
* priority to create 'nice' support across cpus. When
|
||||
* idle rebalancing we should only bias the source_load if
|
||||
* there is more than one task running on that queue to
|
||||
* prevent idle rebalance from trying to pull tasks from a
|
||||
* queue with only one running task.
|
||||
*/
|
||||
source_load = source_load * rq->prio_bias / running;
|
||||
|
||||
return source_load;
|
||||
}
|
||||
|
||||
static inline unsigned long source_load(int cpu, int type)
|
||||
{
|
||||
return __source_load(cpu, type, NOT_IDLE);
|
||||
runqueue_t *rq = cpu_rq(cpu);
|
||||
unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
|
||||
if (type == 0)
|
||||
return load_now;
|
||||
|
||||
return min(rq->cpu_load[type-1], load_now);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a high guess at the load of a migration-target cpu
|
||||
*/
|
||||
static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
|
||||
{
|
||||
runqueue_t *rq = cpu_rq(cpu);
|
||||
unsigned long running = rq->nr_running;
|
||||
unsigned long target_load, cpu_load = rq->cpu_load[type-1],
|
||||
load_now = running * SCHED_LOAD_SCALE;
|
||||
|
||||
if (type == 0)
|
||||
target_load = load_now;
|
||||
else
|
||||
target_load = max(cpu_load, load_now);
|
||||
|
||||
if (running > 1 || (idle == NOT_IDLE && running))
|
||||
target_load = target_load * rq->prio_bias / running;
|
||||
|
||||
return target_load;
|
||||
}
|
||||
|
||||
static inline unsigned long target_load(int cpu, int type)
|
||||
{
|
||||
return __target_load(cpu, type, NOT_IDLE);
|
||||
runqueue_t *rq = cpu_rq(cpu);
|
||||
unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
|
||||
if (type == 0)
|
||||
return load_now;
|
||||
|
||||
return max(rq->cpu_load[type-1], load_now);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1294,9 +1204,6 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
|
||||
}
|
||||
}
|
||||
|
||||
if (p->last_waker_cpu != this_cpu)
|
||||
goto out_set_cpu;
|
||||
|
||||
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
|
||||
goto out_set_cpu;
|
||||
|
||||
@@ -1367,8 +1274,6 @@ out_set_cpu:
|
||||
cpu = task_cpu(p);
|
||||
}
|
||||
|
||||
p->last_waker_cpu = this_cpu;
|
||||
|
||||
out_activate:
|
||||
#endif /* CONFIG_SMP */
|
||||
if (old_state == TASK_UNINTERRUPTIBLE) {
|
||||
@@ -1450,12 +1355,9 @@ void fastcall sched_fork(task_t *p, int clone_flags)
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
memset(&p->sched_info, 0, sizeof(p->sched_info));
|
||||
#endif
|
||||
#if defined(CONFIG_SMP)
|
||||
p->last_waker_cpu = cpu;
|
||||
#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||
p->oncpu = 0;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/* Want to start with kernel preemption disabled. */
|
||||
task_thread_info(p)->preempt_count = 1;
|
||||
@@ -1530,7 +1432,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
|
||||
list_add_tail(&p->run_list, ¤t->run_list);
|
||||
p->array = current->array;
|
||||
p->array->nr_active++;
|
||||
inc_nr_running(p, rq);
|
||||
rq->nr_running++;
|
||||
}
|
||||
set_need_resched();
|
||||
} else
|
||||
@@ -1875,9 +1777,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
|
||||
runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
|
||||
{
|
||||
dequeue_task(p, src_array);
|
||||
dec_nr_running(p, src_rq);
|
||||
src_rq->nr_running--;
|
||||
set_task_cpu(p, this_cpu);
|
||||
inc_nr_running(p, this_rq);
|
||||
this_rq->nr_running++;
|
||||
enqueue_task(p, this_array);
|
||||
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
|
||||
+ this_rq->timestamp_last_tick;
|
||||
@@ -2056,9 +1958,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
||||
|
||||
/* Bias balancing toward cpus of our domain */
|
||||
if (local_group)
|
||||
load = __target_load(i, load_idx, idle);
|
||||
load = target_load(i, load_idx);
|
||||
else
|
||||
load = __source_load(i, load_idx, idle);
|
||||
load = source_load(i, load_idx);
|
||||
|
||||
avg_load += load;
|
||||
}
|
||||
@@ -2171,7 +2073,7 @@ static runqueue_t *find_busiest_queue(struct sched_group *group,
|
||||
int i;
|
||||
|
||||
for_each_cpu_mask(i, group->cpumask) {
|
||||
load = __source_load(i, 0, idle);
|
||||
load = source_load(i, 0);
|
||||
|
||||
if (load > max_load) {
|
||||
max_load = load;
|
||||
@@ -3571,10 +3473,8 @@ void set_user_nice(task_t *p, long nice)
|
||||
goto out_unlock;
|
||||
}
|
||||
array = p->array;
|
||||
if (array) {
|
||||
if (array)
|
||||
dequeue_task(p, array);
|
||||
dec_prio_bias(rq, p->static_prio);
|
||||
}
|
||||
|
||||
old_prio = p->prio;
|
||||
new_prio = NICE_TO_PRIO(nice);
|
||||
@@ -3584,7 +3484,6 @@ void set_user_nice(task_t *p, long nice)
|
||||
|
||||
if (array) {
|
||||
enqueue_task(p, array);
|
||||
inc_prio_bias(rq, p->static_prio);
|
||||
/*
|
||||
* If the task increased its priority or is running and
|
||||
* lowered its priority, then reschedule its CPU:
|
||||
@@ -5159,7 +5058,18 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
|
||||
#define MAX_DOMAIN_DISTANCE 32
|
||||
|
||||
static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
|
||||
{ [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL };
|
||||
{ [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
|
||||
/*
|
||||
* Architectures may override the migration cost and thus avoid
|
||||
* boot-time calibration. Unit is nanoseconds. Mostly useful for
|
||||
* virtualized hardware:
|
||||
*/
|
||||
#ifdef CONFIG_DEFAULT_MIGRATION_COST
|
||||
CONFIG_DEFAULT_MIGRATION_COST
|
||||
#else
|
||||
-1LL
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Allow override of migration cost - in units of microseconds.
|
||||
|
@@ -104,6 +104,8 @@ cond_syscall(sys_setreuid16);
|
||||
cond_syscall(sys_setuid16);
|
||||
cond_syscall(sys_vm86old);
|
||||
cond_syscall(sys_vm86);
|
||||
cond_syscall(compat_sys_ipc);
|
||||
cond_syscall(compat_sys_sysctl);
|
||||
|
||||
/* arch-specific weak syscall entries */
|
||||
cond_syscall(sys_pciconfig_read);
|
||||
|
@@ -44,14 +44,12 @@
|
||||
#include <linux/limits.h>
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#ifdef CONFIG_ROOT_NFS
|
||||
#include <linux/nfs_fs.h>
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SYSCTL)
|
||||
|
||||
/* External variables not in a header file. */
|
||||
@@ -126,8 +124,6 @@ extern int sysctl_hz_timer;
|
||||
extern int acct_parm[];
|
||||
#endif
|
||||
|
||||
int randomize_va_space = 1;
|
||||
|
||||
static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
|
||||
ctl_table *, void **);
|
||||
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
|
||||
@@ -640,6 +636,7 @@ static ctl_table kern_table[] = {
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
#if defined(CONFIG_MMU)
|
||||
{
|
||||
.ctl_name = KERN_RANDOMIZE,
|
||||
.procname = "randomize_va_space",
|
||||
@@ -648,6 +645,7 @@ static ctl_table kern_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
#if defined(CONFIG_S390) && defined(CONFIG_SMP)
|
||||
{
|
||||
.ctl_name = KERN_SPIN_RETRY,
|
||||
@@ -657,6 +655,16 @@ static ctl_table kern_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ACPI_SLEEP
|
||||
{
|
||||
.ctl_name = KERN_ACPI_VIDEO_FLAGS,
|
||||
.procname = "acpi_video_flags",
|
||||
.data = &acpi_video_flags,
|
||||
.maxlen = sizeof (unsigned long),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
{ .ctl_name = 0 }
|
||||
};
|
||||
|
@@ -717,12 +717,16 @@ static void second_overflow(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* in the NTP reference this is called "hardclock()" */
|
||||
static void update_wall_time_one_tick(void)
|
||||
/*
|
||||
* Returns how many microseconds we need to add to xtime this tick
|
||||
* in doing an adjustment requested with adjtime.
|
||||
*/
|
||||
static long adjtime_adjustment(void)
|
||||
{
|
||||
long time_adjust_step, delta_nsec;
|
||||
long time_adjust_step;
|
||||
|
||||
if ((time_adjust_step = time_adjust) != 0 ) {
|
||||
time_adjust_step = time_adjust;
|
||||
if (time_adjust_step) {
|
||||
/*
|
||||
* We are doing an adjtime thing. Prepare time_adjust_step to
|
||||
* be within bounds. Note that a positive time_adjust means we
|
||||
@@ -733,10 +737,19 @@ static void update_wall_time_one_tick(void)
|
||||
*/
|
||||
time_adjust_step = min(time_adjust_step, (long)tickadj);
|
||||
time_adjust_step = max(time_adjust_step, (long)-tickadj);
|
||||
}
|
||||
return time_adjust_step;
|
||||
}
|
||||
|
||||
/* in the NTP reference this is called "hardclock()" */
|
||||
static void update_wall_time_one_tick(void)
|
||||
{
|
||||
long time_adjust_step, delta_nsec;
|
||||
|
||||
time_adjust_step = adjtime_adjustment();
|
||||
if (time_adjust_step)
|
||||
/* Reduce by this step the amount of time left */
|
||||
time_adjust -= time_adjust_step;
|
||||
}
|
||||
delta_nsec = tick_nsec + time_adjust_step * 1000;
|
||||
/*
|
||||
* Advance the phase, once it gets to one microsecond, then
|
||||
@@ -758,6 +771,22 @@ static void update_wall_time_one_tick(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return how long ticks are at the moment, that is, how much time
|
||||
* update_wall_time_one_tick will add to xtime next time we call it
|
||||
* (assuming no calls to do_adjtimex in the meantime).
|
||||
* The return value is in fixed-point nanoseconds with SHIFT_SCALE-10
|
||||
* bits to the right of the binary point.
|
||||
* This function has no side-effects.
|
||||
*/
|
||||
u64 current_tick_length(void)
|
||||
{
|
||||
long delta_nsec;
|
||||
|
||||
delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
|
||||
return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Using a loop looks inefficient, but "ticks" is
|
||||
* usually just one (we shouldn't be losing ticks,
|
||||
|
Reference in New Issue
Block a user