Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/usb/qmi_wwan.c

Overlapping additions of new device IDs to qmi_wwan.c

Signed-off-by: David S. Miller <davem@davemloft.net>
Esse commit está contido em:
David S. Miller
2015-08-21 11:44:04 -07:00
110 arquivos alterados com 844 adições e 789 exclusões

Ver arquivo

@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &cs->mems_allowed);
update_nodemasks_hier(cs, &trialcs->mems_allowed);
done:
return retval;
}

Ver arquivo

@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
perf_pmu_disable(event->pmu);
event->tstamp_running += tstamp - event->tstamp_stopped;
perf_set_shadow_time(event, ctx, tstamp);
perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
goto out;
}
event->tstamp_running += tstamp - event->tstamp_stopped;
if (!is_software_event(event))
cpuctx->active_oncpu++;
if (!ctx->nr_active++)
@@ -4011,28 +4011,21 @@ static void perf_event_for_each(struct perf_event *event,
perf_event_for_each_child(sibling, func);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct perf_event_context *ctx = event->ctx;
int ret = 0, active;
struct period_event {
struct perf_event *event;
u64 value;
};
if (!is_sampling_event(event))
return -EINVAL;
static int __perf_event_period(void *info)
{
struct period_event *pe = info;
struct perf_event *event = pe->event;
struct perf_event_context *ctx = event->ctx;
u64 value = pe->value;
bool active;
if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
return -EINVAL;
raw_spin_lock_irq(&ctx->lock);
raw_spin_lock(&ctx->lock);
if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
goto unlock;
}
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
@@ -4051,11 +4044,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
event->pmu->start(event, PERF_EF_RELOAD);
perf_pmu_enable(ctx->pmu);
}
raw_spin_unlock(&ctx->lock);
unlock:
return 0;
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct period_event pe = { .event = event, };
struct perf_event_context *ctx = event->ctx;
struct task_struct *task;
u64 value;
if (!is_sampling_event(event))
return -EINVAL;
if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
return -EINVAL;
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL;
task = ctx->task;
pe.value = value;
if (!task) {
cpu_function_call(event->cpu, __perf_event_period, &pe);
return 0;
}
retry:
if (!task_function_call(task, __perf_event_period, &pe))
return 0;
raw_spin_lock_irq(&ctx->lock);
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
task = ctx->task;
goto retry;
}
__perf_event_period(&pe);
raw_spin_unlock_irq(&ctx->lock);
return ret;
return 0;
}
static const struct file_operations perf_fops;
@@ -4793,12 +4828,20 @@ static const struct file_operations perf_fops = {
* to user-space before waking everybody up.
*/
static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
{
/* only the parent has fasync state */
if (event->parent)
event = event->parent;
return &event->fasync;
}
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
@@ -6177,7 +6220,7 @@ static int __perf_event_overflow(struct perf_event *event,
else
perf_event_output(event, data, regs);
if (event->fasync && event->pending_kill) {
if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}

Ver arquivo

@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
rb->aux_priv = NULL;
}
for (pg = 0; pg < rb->aux_nr_pages; pg++)
rb_free_aux_page(rb, pg);
if (rb->aux_nr_pages) {
for (pg = 0; pg < rb->aux_nr_pages; pg++)
rb_free_aux_page(rb, pg);
kfree(rb->aux_pages);
rb->aux_nr_pages = 0;
kfree(rb->aux_pages);
rb->aux_nr_pages = 0;
}
}
void rb_free_aux(struct ring_buffer *rb)

Ver arquivo

@@ -4,6 +4,7 @@
#include <linux/hash.h>
#include <linux/bootmem.h>
#include <linux/debug_locks.h>
/*
* Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
struct pv_node *node;
u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
/*
* We must not unlock if SLOW, because in that case we must first
* unhash. Otherwise it would be possible to have multiple @lock
* entries, which would be BAD.
*/
if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
if (likely(lockval == _Q_LOCKED_VAL))
return;
if (unlikely(lockval != _Q_SLOW_VAL)) {
if (debug_locks_silent)
return;
WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
return;
}
/*
* Since the above failed to release, this must be the SLOW path.
* Therefore start by looking up the blocked node and unhashing it.