Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/team/team.c
	drivers/net/usb/qmi_wwan.c
	net/batman-adv/bat_iv_ogm.c
	net/ipv4/fib_frontend.c
	net/ipv4/route.c
	net/l2tp/l2tp_netlink.c

The team, fib_frontend, route, and l2tp_netlink conflicts were simply
overlapping changes.

qmi_wwan and bat_iv_ogm were of the "use HEAD" variety.

With help from Antonio Quartulli.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2012-09-28 14:40:49 -04:00
346 changed files with 2451 additions and 1633 deletions

View File

@@ -234,15 +234,19 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
*/
tmp.data = &current->nsproxy->pid_ns->last_pid;
return proc_dointvec(&tmp, write, buffer, lenp, ppos);
return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
}
extern int pid_max;
static int zero = 0;
static struct ctl_table pid_ns_ctl_table[] = {
{
.procname = "ns_last_pid",
.maxlen = sizeof(int),
.mode = 0666, /* permissions are checked in the handler */
.proc_handler = pid_ns_ctl_handler,
.extra1 = &zero,
.extra2 = &pid_max,
},
{ }
};

View File

@@ -6014,11 +6014,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
* allows us to avoid some pointer chasing select_idle_sibling().
*
* Iterate domains and sched_groups downward, assigning CPUs to be
* select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing
* due to random perturbation self canceling, ie sw buddies pull
* their counterpart to their CPU's hw counterpart.
*
* Also keep a unique ID per domain (we use the first cpu number in
* the cpumask of the domain), this allows us to quickly tell if
* two cpus are in the same cache domain, see cpus_share_cache().
@@ -6032,40 +6027,8 @@ static void update_top_cache_domain(int cpu)
int id = cpu;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
if (sd) {
struct sched_domain *tmp = sd;
struct sched_group *sg, *prev;
bool right;
/*
* Traverse to first CPU in group, and count hops
* to cpu from there, switching direction on each
* hop, never ever pointing the last CPU rightward.
*/
do {
id = cpumask_first(sched_domain_span(tmp));
prev = sg = tmp->groups;
right = 1;
while (cpumask_first(sched_group_cpus(sg)) != id)
sg = sg->next;
while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
prev = sg;
sg = sg->next;
right = !right;
}
/* A CPU went down, never point back to domain start. */
if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
right = false;
sg = right ? sg->next : prev;
tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
} while ((tmp = tmp->child));
if (sd)
id = cpumask_first(sched_domain_span(sd));
}
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_id, cpu) = id;

View File

@@ -2637,6 +2637,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
struct sched_domain *sd;
struct sched_group *sg;
int i;
/*
* If the task is going to be woken-up on this cpu and if it is
@@ -2653,17 +2655,29 @@ static int select_idle_sibling(struct task_struct *p, int target)
return prev_cpu;
/*
* Otherwise, check assigned siblings to find an elegible idle cpu.
* Otherwise, iterate the domains and find an elegible idle cpu.
*/
sd = rcu_dereference(per_cpu(sd_llc, target));
for_each_lower_domain(sd) {
if (!cpumask_test_cpu(sd->idle_buddy, tsk_cpus_allowed(p)))
continue;
if (idle_cpu(sd->idle_buddy))
return sd->idle_buddy;
}
sg = sd->groups;
do {
if (!cpumask_intersects(sched_group_cpus(sg),
tsk_cpus_allowed(p)))
goto next;
for_each_cpu(i, sched_group_cpus(sg)) {
if (!idle_cpu(i))
goto next;
}
target = cpumask_first_and(sched_group_cpus(sg),
tsk_cpus_allowed(p));
goto done;
next:
sg = sg->next;
} while (sg != sd->groups);
}
done:
return target;
}

View File

@@ -303,10 +303,11 @@ void getnstimeofday(struct timespec *ts)
seq = read_seqbegin(&tk->lock);
ts->tv_sec = tk->xtime_sec;
ts->tv_nsec = timekeeping_get_ns(tk);
nsecs = timekeeping_get_ns(tk);
} while (read_seqretry(&tk->lock, seq));
ts->tv_nsec = 0;
timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getnstimeofday);
@@ -345,6 +346,7 @@ void ktime_get_ts(struct timespec *ts)
{
struct timekeeper *tk = &timekeeper;
struct timespec tomono;
s64 nsec;
unsigned int seq;
WARN_ON(timekeeping_suspended);
@@ -352,13 +354,14 @@ void ktime_get_ts(struct timespec *ts)
do {
seq = read_seqbegin(&tk->lock);
ts->tv_sec = tk->xtime_sec;
ts->tv_nsec = timekeeping_get_ns(tk);
nsec = timekeeping_get_ns(tk);
tomono = tk->wall_to_monotonic;
} while (read_seqretry(&tk->lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec);
ts->tv_sec += tomono.tv_sec;
ts->tv_nsec = 0;
timespec_add_ns(ts, nsec + tomono.tv_nsec);
}
EXPORT_SYMBOL_GPL(ktime_get_ts);
@@ -1244,6 +1247,7 @@ void get_monotonic_boottime(struct timespec *ts)
{
struct timekeeper *tk = &timekeeper;
struct timespec tomono, sleep;
s64 nsec;
unsigned int seq;
WARN_ON(timekeeping_suspended);
@@ -1251,14 +1255,15 @@ void get_monotonic_boottime(struct timespec *ts)
do {
seq = read_seqbegin(&tk->lock);
ts->tv_sec = tk->xtime_sec;
ts->tv_nsec = timekeeping_get_ns(tk);
nsec = timekeeping_get_ns(tk);
tomono = tk->wall_to_monotonic;
sleep = tk->total_sleep_time;
} while (read_seqretry(&tk->lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
ts->tv_nsec = 0;
timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
}
EXPORT_SYMBOL_GPL(get_monotonic_boottime);

View File

@@ -1349,8 +1349,16 @@ static void busy_worker_rebind_fn(struct work_struct *work)
struct worker *worker = container_of(work, struct worker, rebind_work);
struct global_cwq *gcwq = worker->pool->gcwq;
if (worker_maybe_bind_and_lock(worker))
worker_clr_flags(worker, WORKER_REBIND);
worker_maybe_bind_and_lock(worker);
/*
* %WORKER_REBIND must be cleared even if the above binding failed;
* otherwise, we may confuse the next CPU_UP cycle or oops / get
* stuck by calling idle_worker_rebind() prematurely. If CPU went
* down again inbetween, %WORKER_UNBOUND would be set, so clearing
* %WORKER_REBIND is always safe.
*/
worker_clr_flags(worker, WORKER_REBIND);
spin_unlock_irq(&gcwq->lock);
}
@@ -3568,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
#ifdef CONFIG_SMP
struct work_for_cpu {
struct completion completion;
struct work_struct work;
long (*fn)(void *);
void *arg;
long ret;
};
static int do_work_for_cpu(void *_wfc)
static void work_for_cpu_fn(struct work_struct *work)
{
struct work_for_cpu *wfc = _wfc;
struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
wfc->ret = wfc->fn(wfc->arg);
complete(&wfc->completion);
return 0;
}
/**
@@ -3594,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc)
*/
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
struct task_struct *sub_thread;
struct work_for_cpu wfc = {
.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
.fn = fn,
.arg = arg,
};
struct work_for_cpu wfc = { .fn = fn, .arg = arg };
sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
if (IS_ERR(sub_thread))
return PTR_ERR(sub_thread);
kthread_bind(sub_thread, cpu);
wake_up_process(sub_thread);
wait_for_completion(&wfc.completion);
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
flush_work(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);