Merge ../linux-2.6
This commit is contained in:
@@ -108,8 +108,10 @@ void free_task(struct task_struct *tsk)
|
||||
}
|
||||
EXPORT_SYMBOL(free_task);
|
||||
|
||||
void __put_task_struct(struct task_struct *tsk)
|
||||
void __put_task_struct_cb(struct rcu_head *rhp)
|
||||
{
|
||||
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
|
||||
|
||||
WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
|
||||
WARN_ON(atomic_read(&tsk->usage));
|
||||
WARN_ON(tsk == current);
|
||||
@@ -1059,6 +1061,12 @@ static task_t *copy_process(unsigned long clone_flags,
|
||||
*/
|
||||
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
|
||||
|
||||
/*
|
||||
* sigaltstack should be cleared when sharing the same VM
|
||||
*/
|
||||
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
|
||||
p->sas_ss_sp = p->sas_ss_size = 0;
|
||||
|
||||
/*
|
||||
* Syscall tracing should be turned off in the child regardless
|
||||
* of CLONE_PTRACE.
|
||||
|
||||
@@ -505,6 +505,41 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
|
||||
return rem;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_IDLE_HZ
|
||||
/**
|
||||
* hrtimer_get_next_event - get the time until next expiry event
|
||||
*
|
||||
* Returns the delta to the next expiry event or KTIME_MAX if no timer
|
||||
* is pending.
|
||||
*/
|
||||
ktime_t hrtimer_get_next_event(void)
|
||||
{
|
||||
struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
|
||||
ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) {
|
||||
struct hrtimer *timer;
|
||||
|
||||
spin_lock_irqsave(&base->lock, flags);
|
||||
if (!base->first) {
|
||||
spin_unlock_irqrestore(&base->lock, flags);
|
||||
continue;
|
||||
}
|
||||
timer = rb_entry(base->first, struct hrtimer, node);
|
||||
delta.tv64 = timer->expires.tv64;
|
||||
spin_unlock_irqrestore(&base->lock, flags);
|
||||
delta = ktime_sub(delta, base->get_time());
|
||||
if (delta.tv64 < mindelta.tv64)
|
||||
mindelta.tv64 = delta.tv64;
|
||||
}
|
||||
if (mindelta.tv64 < 0)
|
||||
mindelta.tv64 = 0;
|
||||
return mindelta;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* hrtimer_init - initialize a timer to the given clock
|
||||
*
|
||||
|
||||
@@ -67,7 +67,43 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
|
||||
|
||||
/* Fake initialization required by compiler */
|
||||
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
|
||||
static int maxbatch = 10000;
|
||||
static int blimit = 10;
|
||||
static int qhimark = 10000;
|
||||
static int qlowmark = 100;
|
||||
#ifdef CONFIG_SMP
|
||||
static int rsinterval = 1000;
|
||||
#endif
|
||||
|
||||
static atomic_t rcu_barrier_cpu_count;
|
||||
static struct semaphore rcu_barrier_sema;
|
||||
static struct completion rcu_barrier_completion;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void force_quiescent_state(struct rcu_data *rdp,
|
||||
struct rcu_ctrlblk *rcp)
|
||||
{
|
||||
int cpu;
|
||||
cpumask_t cpumask;
|
||||
set_need_resched();
|
||||
if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
|
||||
rdp->last_rs_qlen = rdp->qlen;
|
||||
/*
|
||||
* Don't send IPI to itself. With irqs disabled,
|
||||
* rdp->cpu is the current cpu.
|
||||
*/
|
||||
cpumask = rcp->cpumask;
|
||||
cpu_clear(rdp->cpu, cpumask);
|
||||
for_each_cpu_mask(cpu, cpumask)
|
||||
smp_send_reschedule(cpu);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void force_quiescent_state(struct rcu_data *rdp,
|
||||
struct rcu_ctrlblk *rcp)
|
||||
{
|
||||
set_need_resched();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* call_rcu - Queue an RCU callback for invocation after a grace period.
|
||||
@@ -92,17 +128,13 @@ void fastcall call_rcu(struct rcu_head *head,
|
||||
rdp = &__get_cpu_var(rcu_data);
|
||||
*rdp->nxttail = head;
|
||||
rdp->nxttail = &head->next;
|
||||
|
||||
if (unlikely(++rdp->count > 10000))
|
||||
set_need_resched();
|
||||
|
||||
if (unlikely(++rdp->qlen > qhimark)) {
|
||||
rdp->blimit = INT_MAX;
|
||||
force_quiescent_state(rdp, &rcu_ctrlblk);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static atomic_t rcu_barrier_cpu_count;
|
||||
static struct semaphore rcu_barrier_sema;
|
||||
static struct completion rcu_barrier_completion;
|
||||
|
||||
/**
|
||||
* call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
|
||||
* @head: structure to be used for queueing the RCU updates.
|
||||
@@ -131,12 +163,12 @@ void fastcall call_rcu_bh(struct rcu_head *head,
|
||||
rdp = &__get_cpu_var(rcu_bh_data);
|
||||
*rdp->nxttail = head;
|
||||
rdp->nxttail = &head->next;
|
||||
rdp->count++;
|
||||
/*
|
||||
* Should we directly call rcu_do_batch() here ?
|
||||
* if (unlikely(rdp->count > 10000))
|
||||
* rcu_do_batch(rdp);
|
||||
*/
|
||||
|
||||
if (unlikely(++rdp->qlen > qhimark)) {
|
||||
rdp->blimit = INT_MAX;
|
||||
force_quiescent_state(rdp, &rcu_bh_ctrlblk);
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -199,10 +231,12 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
next = rdp->donelist = list->next;
|
||||
list->func(list);
|
||||
list = next;
|
||||
rdp->count--;
|
||||
if (++count >= maxbatch)
|
||||
rdp->qlen--;
|
||||
if (++count >= rdp->blimit)
|
||||
break;
|
||||
}
|
||||
if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
|
||||
rdp->blimit = blimit;
|
||||
if (!rdp->donelist)
|
||||
rdp->donetail = &rdp->donelist;
|
||||
else
|
||||
@@ -473,6 +507,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
|
||||
rdp->quiescbatch = rcp->completed;
|
||||
rdp->qs_pending = 0;
|
||||
rdp->cpu = cpu;
|
||||
rdp->blimit = blimit;
|
||||
}
|
||||
|
||||
static void __devinit rcu_online_cpu(int cpu)
|
||||
@@ -567,7 +602,12 @@ void synchronize_kernel(void)
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
module_param(maxbatch, int, 0);
|
||||
module_param(blimit, int, 0);
|
||||
module_param(qhimark, int, 0);
|
||||
module_param(qlowmark, int, 0);
|
||||
#ifdef CONFIG_SMP
|
||||
module_param(rsinterval, int, 0);
|
||||
#endif
|
||||
EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
||||
EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
|
||||
EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
|
||||
|
||||
@@ -178,13 +178,6 @@ static unsigned int task_timeslice(task_t *p)
|
||||
#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
|
||||
< (long long) (sd)->cache_hot_time)
|
||||
|
||||
void __put_task_struct_cb(struct rcu_head *rhp)
|
||||
{
|
||||
__put_task_struct(container_of(rhp, struct task_struct, rcu));
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(__put_task_struct_cb);
|
||||
|
||||
/*
|
||||
* These are the runqueue data structures:
|
||||
*/
|
||||
@@ -4028,6 +4021,8 @@ static inline void __cond_resched(void)
|
||||
*/
|
||||
if (unlikely(preempt_count()))
|
||||
return;
|
||||
if (unlikely(system_state != SYSTEM_RUNNING))
|
||||
return;
|
||||
do {
|
||||
add_preempt_count(PREEMPT_ACTIVE);
|
||||
schedule();
|
||||
@@ -4333,6 +4328,7 @@ void __devinit init_idle(task_t *idle, int cpu)
|
||||
runqueue_t *rq = cpu_rq(cpu);
|
||||
unsigned long flags;
|
||||
|
||||
idle->timestamp = sched_clock();
|
||||
idle->sleep_avg = 0;
|
||||
idle->array = NULL;
|
||||
idle->prio = MAX_PRIO;
|
||||
|
||||
@@ -50,6 +50,9 @@
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
#if defined(CONFIG_SYSCTL)
|
||||
|
||||
/* External variables not in a header file. */
|
||||
@@ -124,6 +127,10 @@ extern int sysctl_hz_timer;
|
||||
extern int acct_parm[];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IA64
|
||||
extern int no_unaligned_warning;
|
||||
#endif
|
||||
|
||||
static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
|
||||
ctl_table *, void **);
|
||||
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
|
||||
@@ -663,6 +670,16 @@ static ctl_table kern_table[] = {
|
||||
.data = &acpi_video_flags,
|
||||
.maxlen = sizeof (unsigned long),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_doulongvec_minmax,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_IA64
|
||||
{
|
||||
.ctl_name = KERN_IA64_UNALIGNED,
|
||||
.procname = "ignore-unaligned-usertrap",
|
||||
.data = &no_unaligned_warning,
|
||||
.maxlen = sizeof (int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
@@ -929,7 +946,7 @@ static ctl_table fs_table[] = {
|
||||
.data = &files_stat,
|
||||
.maxlen = 3*sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = &proc_dointvec,
|
||||
.proc_handler = &proc_nr_files,
|
||||
},
|
||||
{
|
||||
.ctl_name = FS_MAXFILE,
|
||||
|
||||
@@ -489,9 +489,21 @@ unsigned long next_timer_interrupt(void)
|
||||
struct list_head *list;
|
||||
struct timer_list *nte;
|
||||
unsigned long expires;
|
||||
unsigned long hr_expires = MAX_JIFFY_OFFSET;
|
||||
ktime_t hr_delta;
|
||||
tvec_t *varray[4];
|
||||
int i, j;
|
||||
|
||||
hr_delta = hrtimer_get_next_event();
|
||||
if (hr_delta.tv64 != KTIME_MAX) {
|
||||
struct timespec tsdelta;
|
||||
tsdelta = ktime_to_timespec(hr_delta);
|
||||
hr_expires = timespec_to_jiffies(&tsdelta);
|
||||
if (hr_expires < 3)
|
||||
return hr_expires + jiffies;
|
||||
}
|
||||
hr_expires += jiffies;
|
||||
|
||||
base = &__get_cpu_var(tvec_bases);
|
||||
spin_lock(&base->t_base.lock);
|
||||
expires = base->timer_jiffies + (LONG_MAX >> 1);
|
||||
@@ -542,6 +554,10 @@ found:
|
||||
}
|
||||
}
|
||||
spin_unlock(&base->t_base.lock);
|
||||
|
||||
if (time_before(hr_expires, expires))
|
||||
return hr_expires;
|
||||
|
||||
return expires;
|
||||
}
|
||||
#endif
|
||||
@@ -925,6 +941,8 @@ static inline void update_times(void)
|
||||
void do_timer(struct pt_regs *regs)
|
||||
{
|
||||
jiffies_64++;
|
||||
/* prevent loading jiffies before storing new jiffies_64 value. */
|
||||
barrier();
|
||||
update_times();
|
||||
softlockup_tick(regs);
|
||||
}
|
||||
@@ -1351,10 +1369,10 @@ static inline u64 time_interpolator_get_cycles(unsigned int src)
|
||||
return x();
|
||||
|
||||
case TIME_SOURCE_MMIO64 :
|
||||
return readq((void __iomem *) time_interpolator->addr);
|
||||
return readq_relaxed((void __iomem *)time_interpolator->addr);
|
||||
|
||||
case TIME_SOURCE_MMIO32 :
|
||||
return readl((void __iomem *) time_interpolator->addr);
|
||||
return readl_relaxed((void __iomem *)time_interpolator->addr);
|
||||
|
||||
default: return get_cycles();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user