Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
All three conflicts were cases of simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -242,18 +242,6 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void event_function_local(struct perf_event *event, event_f func, void *data)
|
||||
{
|
||||
struct event_function_struct efs = {
|
||||
.event = event,
|
||||
.func = func,
|
||||
.data = data,
|
||||
};
|
||||
|
||||
int ret = event_function(&efs);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
static void event_function_call(struct perf_event *event, event_f func, void *data)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
@@ -303,6 +291,54 @@ again:
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to event_function_call() + event_function(), but hard assumes IRQs
|
||||
* are already disabled and we're on the right CPU.
|
||||
*/
|
||||
static void event_function_local(struct perf_event *event, event_f func, void *data)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
|
||||
struct task_struct *task = READ_ONCE(ctx->task);
|
||||
struct perf_event_context *task_ctx = NULL;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
if (task) {
|
||||
if (task == TASK_TOMBSTONE)
|
||||
return;
|
||||
|
||||
task_ctx = ctx;
|
||||
}
|
||||
|
||||
perf_ctx_lock(cpuctx, task_ctx);
|
||||
|
||||
task = ctx->task;
|
||||
if (task == TASK_TOMBSTONE)
|
||||
goto unlock;
|
||||
|
||||
if (task) {
|
||||
/*
|
||||
* We must be either inactive or active and the right task,
|
||||
* otherwise we're screwed, since we cannot IPI to somewhere
|
||||
* else.
|
||||
*/
|
||||
if (ctx->is_active) {
|
||||
if (WARN_ON_ONCE(task != current))
|
||||
goto unlock;
|
||||
|
||||
if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
|
||||
goto unlock;
|
||||
}
|
||||
} else {
|
||||
WARN_ON_ONCE(&cpuctx->ctx != ctx);
|
||||
}
|
||||
|
||||
func(event, cpuctx, ctx, data);
|
||||
unlock:
|
||||
perf_ctx_unlock(cpuctx, task_ctx);
|
||||
}
|
||||
|
||||
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
|
||||
PERF_FLAG_FD_OUTPUT |\
|
||||
PERF_FLAG_PID_CGROUP |\
|
||||
@@ -3513,9 +3549,10 @@ static int perf_event_read(struct perf_event *event, bool group)
|
||||
.group = group,
|
||||
.ret = 0,
|
||||
};
|
||||
smp_call_function_single(event->oncpu,
|
||||
__perf_event_read, &data, 1);
|
||||
ret = data.ret;
|
||||
ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
|
||||
/* The event must have been read from an online CPU: */
|
||||
WARN_ON_ONCE(ret);
|
||||
ret = ret ? : data.ret;
|
||||
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
unsigned long flags;
|
||||
@@ -6129,7 +6166,7 @@ static int __perf_pmu_output_stop(void *info)
|
||||
{
|
||||
struct perf_event *event = info;
|
||||
struct pmu *pmu = event->pmu;
|
||||
struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
||||
struct remote_output ro = {
|
||||
.rb = event->rb,
|
||||
};
|
||||
@@ -6583,15 +6620,6 @@ got_name:
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Whether this @filter depends on a dynamic object which is not loaded
|
||||
* yet or its load addresses are not known.
|
||||
*/
|
||||
static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter)
|
||||
{
|
||||
return filter->filter && filter->inode;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether inode and address range match filter criteria.
|
||||
*/
|
||||
@@ -6653,6 +6681,13 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma)
|
||||
struct perf_event_context *ctx;
|
||||
int ctxn;
|
||||
|
||||
/*
|
||||
* Data tracing isn't supported yet and as such there is no need
|
||||
* to keep track of anything that isn't related to executable code:
|
||||
*/
|
||||
if (!(vma->vm_flags & VM_EXEC))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_task_context_nr(ctxn) {
|
||||
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
|
||||
@@ -7805,7 +7840,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
|
||||
list_for_each_entry(filter, &ifh->list, entry) {
|
||||
event->addr_filters_offs[count] = 0;
|
||||
|
||||
if (perf_addr_filter_needs_mmap(filter))
|
||||
/*
|
||||
* Adjust base offset if the filter is associated to a binary
|
||||
* that needs to be mapped:
|
||||
*/
|
||||
if (filter->inode)
|
||||
event->addr_filters_offs[count] =
|
||||
perf_addr_filter_apply(filter, mm);
|
||||
|
||||
@@ -7936,8 +7975,10 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (token == IF_SRC_FILE) {
|
||||
filename = match_strdup(&args[2]);
|
||||
if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
|
||||
int fpos = filter->range ? 2 : 1;
|
||||
|
||||
filename = match_strdup(&args[fpos]);
|
||||
if (!filename) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
|
@@ -172,8 +172,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
||||
err = -EAGAIN;
|
||||
ptep = page_check_address(page, mm, addr, &ptl, 0);
|
||||
if (!ptep)
|
||||
if (!ptep) {
|
||||
mem_cgroup_cancel_charge(kpage, memcg, false);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
get_page(kpage);
|
||||
page_add_new_anon_rmap(kpage, vma, addr, false);
|
||||
@@ -200,7 +202,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
|
||||
err = 0;
|
||||
unlock:
|
||||
mem_cgroup_cancel_charge(kpage, memcg, false);
|
||||
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
||||
unlock_page(page);
|
||||
return err;
|
||||
|
@@ -39,6 +39,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
get_online_cpus();
|
||||
if (max_vecs >= num_online_cpus()) {
|
||||
cpumask_copy(affinity_mask, cpu_online_mask);
|
||||
*nr_vecs = num_online_cpus();
|
||||
@@ -56,6 +57,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
|
||||
}
|
||||
*nr_vecs = vecs;
|
||||
}
|
||||
put_online_cpus();
|
||||
|
||||
return affinity_mask;
|
||||
}
|
||||
|
@@ -820,6 +820,17 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
|
||||
desc->name = name;
|
||||
|
||||
if (handle != handle_bad_irq && is_chained) {
|
||||
/*
|
||||
* We're about to start this interrupt immediately,
|
||||
* hence the need to set the trigger configuration.
|
||||
* But the .set_type callback may have overridden the
|
||||
* flow handler, ignoring that we're dealing with a
|
||||
* chained interrupt. Reset it immediately because we
|
||||
* do know better.
|
||||
*/
|
||||
__irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data));
|
||||
desc->handle_irq = handle;
|
||||
|
||||
irq_settings_set_noprobe(desc);
|
||||
irq_settings_set_norequest(desc);
|
||||
irq_settings_set_nothread(desc);
|
||||
|
@@ -1681,8 +1681,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
||||
action->dev_id = dev_id;
|
||||
|
||||
retval = irq_chip_pm_get(&desc->irq_data);
|
||||
if (retval < 0)
|
||||
if (retval < 0) {
|
||||
kfree(action);
|
||||
return retval;
|
||||
}
|
||||
|
||||
chip_bus_lock(desc);
|
||||
retval = __setup_irq(irq, desc, action);
|
||||
@@ -1985,8 +1987,10 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||
action->percpu_dev_id = dev_id;
|
||||
|
||||
retval = irq_chip_pm_get(&desc->irq_data);
|
||||
if (retval < 0)
|
||||
if (retval < 0) {
|
||||
kfree(action);
|
||||
return retval;
|
||||
}
|
||||
|
||||
chip_bus_lock(desc);
|
||||
retval = __setup_irq(irq, desc, action);
|
||||
|
@@ -835,9 +835,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
|
||||
*/
|
||||
static bool rtree_next_node(struct memory_bitmap *bm)
|
||||
{
|
||||
bm->cur.node = list_entry(bm->cur.node->list.next,
|
||||
struct rtree_node, list);
|
||||
if (&bm->cur.node->list != &bm->cur.zone->leaves) {
|
||||
if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
|
||||
bm->cur.node = list_entry(bm->cur.node->list.next,
|
||||
struct rtree_node, list);
|
||||
bm->cur.node_pfn += BM_BITS_PER_BLOCK;
|
||||
bm->cur.node_bit = 0;
|
||||
touch_softlockup_watchdog();
|
||||
@@ -845,9 +845,9 @@ static bool rtree_next_node(struct memory_bitmap *bm)
|
||||
}
|
||||
|
||||
/* No more nodes, goto next zone */
|
||||
bm->cur.zone = list_entry(bm->cur.zone->list.next,
|
||||
if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
|
||||
bm->cur.zone = list_entry(bm->cur.zone->list.next,
|
||||
struct mem_zone_bm_rtree, list);
|
||||
if (&bm->cur.zone->list != &bm->zones) {
|
||||
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
|
||||
struct rtree_node, list);
|
||||
bm->cur.node_pfn = 0;
|
||||
|
@@ -9,10 +9,10 @@
|
||||
|
||||
char *_braille_console_setup(char **str, char **brl_options)
|
||||
{
|
||||
if (!memcmp(*str, "brl,", 4)) {
|
||||
if (!strncmp(*str, "brl,", 4)) {
|
||||
*brl_options = "";
|
||||
*str += 4;
|
||||
} else if (!memcmp(str, "brl=", 4)) {
|
||||
} else if (!strncmp(*str, "brl=", 4)) {
|
||||
*brl_options = *str + 4;
|
||||
*str = strchr(*brl_options, ',');
|
||||
if (!*str)
|
||||
|
@@ -263,6 +263,11 @@ void account_idle_time(cputime_t cputime)
|
||||
cpustat[CPUTIME_IDLE] += (__force u64) cputime;
|
||||
}
|
||||
|
||||
/*
|
||||
* When a guest is interrupted for a longer amount of time, missed clock
|
||||
* ticks are not redelivered later. Due to that, this function may on
|
||||
* occasion account more time than the calling functions think elapsed.
|
||||
*/
|
||||
static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
|
||||
{
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
@@ -371,7 +376,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||
* idle, or potentially user or system time. Due to rounding,
|
||||
* other time can exceed ticks occasionally.
|
||||
*/
|
||||
other = account_other_time(cputime);
|
||||
other = account_other_time(ULONG_MAX);
|
||||
if (other >= cputime)
|
||||
return;
|
||||
cputime -= other;
|
||||
@@ -486,7 +491,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
||||
}
|
||||
|
||||
cputime = cputime_one_jiffy;
|
||||
steal = steal_account_process_time(cputime);
|
||||
steal = steal_account_process_time(ULONG_MAX);
|
||||
|
||||
if (steal >= cputime)
|
||||
return;
|
||||
@@ -516,7 +521,7 @@ void account_idle_ticks(unsigned long ticks)
|
||||
}
|
||||
|
||||
cputime = jiffies_to_cputime(ticks);
|
||||
steal = steal_account_process_time(cputime);
|
||||
steal = steal_account_process_time(ULONG_MAX);
|
||||
|
||||
if (steal >= cputime)
|
||||
return;
|
||||
@@ -614,19 +619,25 @@ static void cputime_adjust(struct task_cputime *curr,
|
||||
stime = curr->stime;
|
||||
utime = curr->utime;
|
||||
|
||||
if (utime == 0) {
|
||||
stime = rtime;
|
||||
/*
|
||||
* If either stime or both stime and utime are 0, assume all runtime is
|
||||
* userspace. Once a task gets some ticks, the monotonicy code at
|
||||
* 'update' will ensure things converge to the observed ratio.
|
||||
*/
|
||||
if (stime == 0) {
|
||||
utime = rtime;
|
||||
goto update;
|
||||
}
|
||||
|
||||
if (stime == 0) {
|
||||
utime = rtime;
|
||||
if (utime == 0) {
|
||||
stime = rtime;
|
||||
goto update;
|
||||
}
|
||||
|
||||
stime = scale_stime((__force u64)stime, (__force u64)rtime,
|
||||
(__force u64)(stime + utime));
|
||||
|
||||
update:
|
||||
/*
|
||||
* Make sure stime doesn't go backwards; this preserves monotonicity
|
||||
* for utime because rtime is monotonic.
|
||||
@@ -649,7 +660,6 @@ static void cputime_adjust(struct task_cputime *curr,
|
||||
stime = rtime - utime;
|
||||
}
|
||||
|
||||
update:
|
||||
prev->stime = stime;
|
||||
prev->utime = utime;
|
||||
out:
|
||||
@@ -694,6 +704,13 @@ static cputime_t get_vtime_delta(struct task_struct *tsk)
|
||||
unsigned long now = READ_ONCE(jiffies);
|
||||
cputime_t delta, other;
|
||||
|
||||
/*
|
||||
* Unlike tick based timing, vtime based timing never has lost
|
||||
* ticks, and no need for steal time accounting to make up for
|
||||
* lost ticks. Vtime accounts a rounded version of actual
|
||||
* elapsed time. Limit account_other_time to prevent rounding
|
||||
* errors from causing elapsed vtime to go negative.
|
||||
*/
|
||||
delta = jiffies_to_cputime(now - tsk->vtime_snap);
|
||||
other = account_other_time(delta);
|
||||
WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
|
||||
|
@@ -2140,6 +2140,21 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
|
||||
int *valp,
|
||||
int write, void *data)
|
||||
{
|
||||
if (write) {
|
||||
if (*negp)
|
||||
return -EINVAL;
|
||||
*valp = *lvalp;
|
||||
} else {
|
||||
unsigned int val = *valp;
|
||||
*lvalp = (unsigned long)val;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char proc_wspace_sep[] = { ' ', '\t', '\n' };
|
||||
|
||||
static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
|
||||
@@ -2259,8 +2274,27 @@ static int do_proc_dointvec(struct ctl_table *table, int write,
|
||||
int proc_dointvec(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
return do_proc_dointvec(table,write,buffer,lenp,ppos,
|
||||
NULL,NULL);
|
||||
return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* proc_douintvec - read a vector of unsigned integers
|
||||
* @table: the sysctl table
|
||||
* @write: %TRUE if this is a write to the sysctl file
|
||||
* @buffer: the user buffer
|
||||
* @lenp: the size of the user buffer
|
||||
* @ppos: file position
|
||||
*
|
||||
* Reads/writes up to table->maxlen/sizeof(unsigned int) unsigned integer
|
||||
* values from/to the user buffer, treated as an ASCII string.
|
||||
*
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int proc_douintvec(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
return do_proc_dointvec(table, write, buffer, lenp, ppos,
|
||||
do_proc_douintvec_conv, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2858,6 +2892,12 @@ int proc_dointvec(struct ctl_table *table, int write,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int proc_douintvec(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int proc_dointvec_minmax(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
@@ -2903,6 +2943,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
|
||||
* exception granted :-)
|
||||
*/
|
||||
EXPORT_SYMBOL(proc_dointvec);
|
||||
EXPORT_SYMBOL(proc_douintvec);
|
||||
EXPORT_SYMBOL(proc_dointvec_jiffies);
|
||||
EXPORT_SYMBOL(proc_dointvec_minmax);
|
||||
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
|
||||
|
@@ -401,7 +401,10 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
||||
do {
|
||||
seq = raw_read_seqcount_latch(&tkf->seq);
|
||||
tkr = tkf->base + (seq & 0x01);
|
||||
now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
|
||||
now = ktime_to_ns(tkr->base);
|
||||
|
||||
now += clocksource_delta(tkr->read(tkr->clock),
|
||||
tkr->cycle_last, tkr->mask);
|
||||
} while (read_seqcount_retry(&tkf->seq, seq));
|
||||
|
||||
return now;
|
||||
|
@@ -23,7 +23,9 @@
|
||||
|
||||
#include "timekeeping_internal.h"
|
||||
|
||||
static unsigned int sleep_time_bin[32] = {0};
|
||||
#define NUM_BINS 32
|
||||
|
||||
static unsigned int sleep_time_bin[NUM_BINS] = {0};
|
||||
|
||||
static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
|
||||
{
|
||||
@@ -69,6 +71,9 @@ late_initcall(tk_debug_sleep_time_init);
|
||||
|
||||
void tk_debug_account_sleep_time(struct timespec64 *t)
|
||||
{
|
||||
sleep_time_bin[fls(t->tv_sec)]++;
|
||||
/* Cap bin index so we don't overflow the array */
|
||||
int bin = min(fls(t->tv_sec), NUM_BINS-1);
|
||||
|
||||
sleep_time_bin[bin]++;
|
||||
}
|
||||
|
||||
|
@@ -223,7 +223,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
||||
what |= MASK_TC_BIT(op_flags, META);
|
||||
what |= MASK_TC_BIT(op_flags, PREFLUSH);
|
||||
what |= MASK_TC_BIT(op_flags, FUA);
|
||||
if (op == REQ_OP_DISCARD)
|
||||
if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
|
||||
what |= BLK_TC_ACT(BLK_TC_DISCARD);
|
||||
if (op == REQ_OP_FLUSH)
|
||||
what |= BLK_TC_ACT(BLK_TC_FLUSH);
|
||||
|
Reference in New Issue
Block a user