Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

The conflicts were two cases of overlapping changes in
batman-adv and the qed driver.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2017-06-15 11:31:37 -04:00
451 changed files with 3855 additions and 2255 deletions

View File

@@ -1658,13 +1658,13 @@ static ssize_t write_cpuhp_target(struct device *dev,
ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
mutex_unlock(&cpuhp_state_mutex);
if (ret)
return ret;
goto out;
if (st->state < target)
ret = do_cpu_up(dev->id, target);
else
ret = do_cpu_down(dev->id, target);
out:
unlock_device_hotplug();
return ret ? ret : count;
}

View File

@@ -7329,6 +7329,21 @@ int perf_event_account_interrupt(struct perf_event *event)
return __perf_event_account_interrupt(event, 1);
}
static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
{
/*
* Due to interrupt latency (AKA "skid"), we may enter the
* kernel before taking an overflow, even if the PMU is only
* counting user events.
* To avoid leaking information to userspace, we must always
* reject kernel samples when exclude_kernel is set.
*/
if (event->attr.exclude_kernel && !user_mode(regs))
return false;
return true;
}
/*
* Generic event overflow handling, sampling.
*/
@@ -7349,6 +7364,12 @@ static int __perf_event_overflow(struct perf_event *event,
ret = __perf_event_account_interrupt(event, throttle);
/*
* For security, drop the skid kernel samples if necessary.
*/
if (!sample_is_allowed(event, regs))
return ret;
/*
* XXX event_limit might not quite work as expected on inherited
* events

View File

@@ -132,7 +132,7 @@ int freeze_processes(void)
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
pm_wakeup_clear(true);
pm_wakeup_clear();
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);

View File

@@ -72,8 +72,6 @@ static void freeze_begin(void)
static void freeze_enter(void)
{
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
spin_lock_irq(&suspend_freeze_lock);
if (pm_wakeup_pending())
goto out;
@@ -100,27 +98,6 @@ static void freeze_enter(void)
out:
suspend_freeze_state = FREEZE_STATE_NONE;
spin_unlock_irq(&suspend_freeze_lock);
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
}
static void s2idle_loop(void)
{
do {
freeze_enter();
if (freeze_ops && freeze_ops->wake)
freeze_ops->wake();
dpm_resume_noirq(PMSG_RESUME);
if (freeze_ops && freeze_ops->sync)
freeze_ops->sync();
if (pm_wakeup_pending())
break;
pm_wakeup_clear(false);
} while (!dpm_suspend_noirq(PMSG_SUSPEND));
}
void freeze_wake(void)
@@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
s2idle_loop();
goto Platform_early_resume;
trace_suspend_resume(TPS("machine_suspend"), state, true);
freeze_enter();
trace_suspend_resume(TPS("machine_suspend"), state, false);
goto Platform_wake;
}
error = disable_nonboot_cpus();

View File

@@ -269,7 +269,6 @@ static struct console *exclusive_console;
#define MAX_CMDLINECONSOLES 8
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
static int console_cmdline_cnt;
static int preferred_console = -1;
int console_set_on_cmdline;
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
* See if this tty is not yet registered, and
* if we have a slot free.
*/
for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) {
for (i = 0, c = console_cmdline;
i < MAX_CMDLINECONSOLES && c->name[0];
i++, c++) {
if (strcmp(c->name, name) == 0 && c->index == idx) {
if (brl_options)
return 0;
/*
* Maintain an invariant that will help to find if
* the matching console is preferred, see
* register_console():
*
* The last non-braille console is always
* the preferred one.
*/
if (i != console_cmdline_cnt - 1)
swap(console_cmdline[i],
console_cmdline[console_cmdline_cnt - 1]);
preferred_console = console_cmdline_cnt - 1;
if (!brl_options)
preferred_console = i;
return 0;
}
}
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
braille_set_options(c, brl_options);
c->index = idx;
console_cmdline_cnt++;
return 0;
}
/*
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
}
/*
* See if this console matches one we selected on the command line.
*
* There may be several entries in the console_cmdline array matching
* with the same console, one with newcon->match(), another by
* name/index:
*
* pl011,mmio,0x87e024000000,115200 -- added from SPCR
* ttyAMA0 -- added from command line
*
* Traverse the console_cmdline array in reverse order to be
* sure that if this console is preferred then it will be the first
* matching entry. We use the invariant that is maintained in
* __add_preferred_console().
* See if this console matches one we selected on
* the command line.
*/
for (i = console_cmdline_cnt - 1; i >= 0; i--) {
c = console_cmdline + i;
for (i = 0, c = console_cmdline;
i < MAX_CMDLINECONSOLES && c->name[0];
i++, c++) {
if (!newcon->match ||
newcon->match(newcon, c->name, c->index, c->options) != 0) {
/* default matching */

View File

@@ -263,7 +263,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
/*
* Counts the new reader in the appropriate per-CPU element of the
* srcu_struct. Must be called from process context.
* srcu_struct.
* Returns an index that must be passed to the matching srcu_read_unlock().
*/
int __srcu_read_lock(struct srcu_struct *sp)
@@ -271,7 +271,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
int idx;
idx = READ_ONCE(sp->completed) & 0x1;
__this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */
return idx;
}
@@ -281,7 +281,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
* Removes the count for the old reader from the appropriate per-CPU
* element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock().
* Must be called from process context.
*/
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{

View File

@@ -97,8 +97,9 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
/*
* Counts the new reader in the appropriate per-CPU element of the
* srcu_struct. Must be called from process context.
* Returns an index that must be passed to the matching srcu_read_unlock().
* srcu_struct. Can be invoked from irq/bh handlers, but the matching
* __srcu_read_unlock() must be in the same handler instance. Returns an
* index that must be passed to the matching srcu_read_unlock().
*/
int __srcu_read_lock(struct srcu_struct *sp)
{
@@ -112,7 +113,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
/*
* Removes the count for the old reader from the appropriate element of
* the srcu_struct. Must be called from process context.
* the srcu_struct.
*/
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{

View File

@@ -357,7 +357,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
/*
* Counts the new reader in the appropriate per-CPU element of the
* srcu_struct. Must be called from process context.
* srcu_struct.
* Returns an index that must be passed to the matching srcu_read_unlock().
*/
int __srcu_read_lock(struct srcu_struct *sp)
@@ -365,7 +365,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
int idx;
idx = READ_ONCE(sp->srcu_idx) & 0x1;
__this_cpu_inc(sp->sda->srcu_lock_count[idx]);
this_cpu_inc(sp->sda->srcu_lock_count[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */
return idx;
}
@@ -375,7 +375,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
* Removes the count for the old reader from the appropriate per-CPU
* element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock().
* Must be called from process context.
*/
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{

View File

@@ -101,9 +101,6 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
if (sg_policy->next_freq == next_freq)
return;
if (sg_policy->next_freq > next_freq)
next_freq = (sg_policy->next_freq + next_freq) >> 1;
sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time;