Merge branch 'perf/urgent' into perf/core, to pick up dependency
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Этот коммит содержится в:
@@ -172,8 +172,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
||||
err = -EAGAIN;
|
||||
ptep = page_check_address(page, mm, addr, &ptl, 0);
|
||||
if (!ptep)
|
||||
if (!ptep) {
|
||||
mem_cgroup_cancel_charge(kpage, memcg, false);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
get_page(kpage);
|
||||
page_add_new_anon_rmap(kpage, vma, addr, false);
|
||||
@@ -200,7 +202,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
|
||||
err = 0;
|
||||
unlock:
|
||||
mem_cgroup_cancel_charge(kpage, memcg, false);
|
||||
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
||||
unlock_page(page);
|
||||
return err;
|
||||
|
@@ -179,7 +179,15 @@ int __read_mostly futex_cmpxchg_enabled;
|
||||
* Futex flags used to encode options to functions and preserve them across
|
||||
* restarts.
|
||||
*/
|
||||
#define FLAGS_SHARED 0x01
|
||||
#ifdef CONFIG_MMU
|
||||
# define FLAGS_SHARED 0x01
|
||||
#else
|
||||
/*
|
||||
* NOMMU does not have per process address space. Let the compiler optimize
|
||||
* code away.
|
||||
*/
|
||||
# define FLAGS_SHARED 0x00
|
||||
#endif
|
||||
#define FLAGS_CLOCKRT 0x02
|
||||
#define FLAGS_HAS_TIMEOUT 0x04
|
||||
|
||||
@@ -405,6 +413,16 @@ static void get_futex_key_refs(union futex_key *key)
|
||||
if (!key->both.ptr)
|
||||
return;
|
||||
|
||||
/*
|
||||
* On MMU less systems futexes are always "private" as there is no per
|
||||
* process address space. We need the smp wmb nevertheless - yes,
|
||||
* arch/blackfin has MMU less SMP ...
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_MMU)) {
|
||||
smp_mb(); /* explicit smp_mb(); (B) */
|
||||
return;
|
||||
}
|
||||
|
||||
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
||||
case FUT_OFF_INODE:
|
||||
ihold(key->shared.inode); /* implies smp_mb(); (B) */
|
||||
@@ -436,6 +454,9 @@ static void drop_futex_key_refs(union futex_key *key)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MMU))
|
||||
return;
|
||||
|
||||
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
||||
case FUT_OFF_INODE:
|
||||
iput(key->shared.inode);
|
||||
|
@@ -359,6 +359,17 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
||||
else
|
||||
dev_dbg(dev, "irq [%d-%d] for MSI\n",
|
||||
virq, virq + desc->nvec_used - 1);
|
||||
/*
|
||||
* This flag is set by the PCI layer as we need to activate
|
||||
* the MSI entries before the PCI layer enables MSI in the
|
||||
* card. Otherwise the card latches a random msi message.
|
||||
*/
|
||||
if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
|
||||
struct irq_data *irq_data;
|
||||
|
||||
irq_data = irq_domain_get_irq_data(domain, desc->irq);
|
||||
irq_domain_activate_irq(irq_data);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -450,7 +450,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
|
||||
goto gotlock;
|
||||
}
|
||||
}
|
||||
WRITE_ONCE(pn->state, vcpu_halted);
|
||||
WRITE_ONCE(pn->state, vcpu_hashed);
|
||||
qstat_inc(qstat_pv_wait_head, true);
|
||||
qstat_inc(qstat_pv_wait_again, waitcnt);
|
||||
pv_wait(&l->locked, _Q_SLOW_VAL);
|
||||
|
@@ -153,7 +153,6 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
|
||||
*/
|
||||
if ((counter == qstat_pv_latency_kick) ||
|
||||
(counter == qstat_pv_latency_wake)) {
|
||||
stat = 0;
|
||||
if (kicks)
|
||||
stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
|
||||
}
|
||||
|
@@ -300,12 +300,12 @@ static int create_image(int platform_mode)
|
||||
save_processor_state();
|
||||
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
|
||||
error = swsusp_arch_suspend();
|
||||
/* Restore control flow magically appears here */
|
||||
restore_processor_state();
|
||||
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
|
||||
if (error)
|
||||
printk(KERN_ERR "PM: Error %d creating hibernation image\n",
|
||||
error);
|
||||
/* Restore control flow magically appears here */
|
||||
restore_processor_state();
|
||||
if (!in_suspend)
|
||||
events_check_enabled = false;
|
||||
|
||||
|
@@ -16,11 +16,9 @@
|
||||
*/
|
||||
#include <linux/percpu.h>
|
||||
|
||||
typedef __printf(2, 0) int (*printk_func_t)(int level, const char *fmt,
|
||||
va_list args);
|
||||
typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
|
||||
|
||||
__printf(2, 0)
|
||||
int vprintk_default(int level, const char *fmt, va_list args);
|
||||
int __printf(1, 0) vprintk_default(const char *fmt, va_list args);
|
||||
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
|
||||
@@ -33,10 +31,9 @@ extern raw_spinlock_t logbuf_lock;
|
||||
* via per-CPU variable.
|
||||
*/
|
||||
DECLARE_PER_CPU(printk_func_t, printk_func);
|
||||
__printf(2, 0)
|
||||
static inline int vprintk_func(int level, const char *fmt, va_list args)
|
||||
static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
|
||||
{
|
||||
return this_cpu_read(printk_func)(level, fmt, args);
|
||||
return this_cpu_read(printk_func)(fmt, args);
|
||||
}
|
||||
|
||||
extern atomic_t nmi_message_lost;
|
||||
@@ -47,10 +44,9 @@ static inline int get_nmi_message_lost(void)
|
||||
|
||||
#else /* CONFIG_PRINTK_NMI */
|
||||
|
||||
__printf(2, 0)
|
||||
static inline int vprintk_func(int level, const char *fmt, va_list args)
|
||||
static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
|
||||
{
|
||||
return vprintk_default(level, fmt, args);
|
||||
return vprintk_default(fmt, args);
|
||||
}
|
||||
|
||||
static inline int get_nmi_message_lost(void)
|
||||
|
@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
|
||||
* one writer running. But the buffer might get flushed from another
|
||||
* CPU, so we need to be careful.
|
||||
*/
|
||||
static int vprintk_nmi(int level, const char *fmt, va_list args)
|
||||
static int vprintk_nmi(const char *fmt, va_list args)
|
||||
{
|
||||
struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
|
||||
int add = 0;
|
||||
@@ -79,16 +79,7 @@ again:
|
||||
if (!len)
|
||||
smp_rmb();
|
||||
|
||||
if (level != LOGLEVEL_DEFAULT) {
|
||||
add = snprintf(s->buffer + len, sizeof(s->buffer) - len,
|
||||
KERN_SOH "%c", '0' + level);
|
||||
add += vsnprintf(s->buffer + len + add,
|
||||
sizeof(s->buffer) - len - add,
|
||||
fmt, args);
|
||||
} else {
|
||||
add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len,
|
||||
fmt, args);
|
||||
}
|
||||
add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
|
||||
|
||||
/*
|
||||
* Do it once again if the buffer has been flushed in the meantime.
|
||||
|
@@ -1930,26 +1930,7 @@ asmlinkage int printk_emit(int facility, int level,
|
||||
}
|
||||
EXPORT_SYMBOL(printk_emit);
|
||||
|
||||
#define define_pr_level(func, loglevel) \
|
||||
asmlinkage __visible void func(const char *fmt, ...) \
|
||||
{ \
|
||||
va_list args; \
|
||||
\
|
||||
va_start(args, fmt); \
|
||||
vprintk_default(loglevel, fmt, args); \
|
||||
va_end(args); \
|
||||
} \
|
||||
EXPORT_SYMBOL(func)
|
||||
|
||||
define_pr_level(__pr_emerg, LOGLEVEL_EMERG);
|
||||
define_pr_level(__pr_alert, LOGLEVEL_ALERT);
|
||||
define_pr_level(__pr_crit, LOGLEVEL_CRIT);
|
||||
define_pr_level(__pr_err, LOGLEVEL_ERR);
|
||||
define_pr_level(__pr_warn, LOGLEVEL_WARNING);
|
||||
define_pr_level(__pr_notice, LOGLEVEL_NOTICE);
|
||||
define_pr_level(__pr_info, LOGLEVEL_INFO);
|
||||
|
||||
int vprintk_default(int level, const char *fmt, va_list args)
|
||||
int vprintk_default(const char *fmt, va_list args)
|
||||
{
|
||||
int r;
|
||||
|
||||
@@ -1959,7 +1940,7 @@ int vprintk_default(int level, const char *fmt, va_list args)
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
r = vprintk_emit(0, level, NULL, 0, fmt, args);
|
||||
r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
|
||||
|
||||
return r;
|
||||
}
|
||||
@@ -1992,7 +1973,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
|
||||
int r;
|
||||
|
||||
va_start(args, fmt);
|
||||
r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args);
|
||||
r = vprintk_func(fmt, args);
|
||||
va_end(args);
|
||||
|
||||
return r;
|
||||
|
@@ -74,6 +74,7 @@
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/frame.h>
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/tlb.h>
|
||||
@@ -2971,6 +2972,23 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
||||
EXPORT_PER_CPU_SYMBOL(kstat);
|
||||
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
|
||||
|
||||
/*
|
||||
* The function fair_sched_class.update_curr accesses the struct curr
|
||||
* and its field curr->exec_start; when called from task_sched_runtime(),
|
||||
* we observe a high rate of cache misses in practice.
|
||||
* Prefetching this data results in improved performance.
|
||||
*/
|
||||
static inline void prefetch_curr_exec_start(struct task_struct *p)
|
||||
{
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
struct sched_entity *curr = (&p->se)->cfs_rq->curr;
|
||||
#else
|
||||
struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
|
||||
#endif
|
||||
prefetch(curr);
|
||||
prefetch(&curr->exec_start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return accounted runtime for the task.
|
||||
* In case the task is currently running, return the runtime plus current's
|
||||
@@ -3005,6 +3023,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
||||
* thread, breaking clock_gettime().
|
||||
*/
|
||||
if (task_current(rq, p) && task_on_rq_queued(p)) {
|
||||
prefetch_curr_exec_start(p);
|
||||
update_rq_clock(rq);
|
||||
p->sched_class->update_curr(rq);
|
||||
}
|
||||
|
@@ -168,7 +168,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
|
||||
|
||||
if (old_idx == IDX_INVALID) {
|
||||
cp->size++;
|
||||
cp->elements[cp->size - 1].dl = 0;
|
||||
cp->elements[cp->size - 1].dl = dl;
|
||||
cp->elements[cp->size - 1].cpu = cpu;
|
||||
cp->elements[cpu].idx = cp->size - 1;
|
||||
cpudl_change_key(cp, cp->size - 1, dl);
|
||||
|
@@ -508,13 +508,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
||||
*/
|
||||
void account_idle_ticks(unsigned long ticks)
|
||||
{
|
||||
cputime_t cputime, steal;
|
||||
|
||||
if (sched_clock_irqtime) {
|
||||
irqtime_account_idle_ticks(ticks);
|
||||
return;
|
||||
}
|
||||
|
||||
account_idle_time(jiffies_to_cputime(ticks));
|
||||
cputime = jiffies_to_cputime(ticks);
|
||||
steal = steal_account_process_time(cputime);
|
||||
|
||||
if (steal >= cputime)
|
||||
return;
|
||||
|
||||
cputime -= steal;
|
||||
account_idle_time(cputime);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -658,8 +658,11 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
|
||||
*
|
||||
* XXX figure out if select_task_rq_dl() deals with offline cpus.
|
||||
*/
|
||||
if (unlikely(!rq->online))
|
||||
if (unlikely(!rq->online)) {
|
||||
lockdep_unpin_lock(&rq->lock, rf.cookie);
|
||||
rq = dl_task_offline_migration(rq, p);
|
||||
rf.cookie = lockdep_pin_lock(&rq->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Queueing this task back might have overloaded rq, check if we need
|
||||
|
@@ -4269,7 +4269,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
|
||||
pcfs_rq = tg->parent->cfs_rq[cpu];
|
||||
|
||||
cfs_rq->throttle_count = pcfs_rq->throttle_count;
|
||||
pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
|
||||
cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
|
||||
}
|
||||
|
||||
/* conditionally throttle active cfs_rq's from put_prev_entity() */
|
||||
|
@@ -1496,6 +1496,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
||||
u64 expires = KTIME_MAX;
|
||||
unsigned long nextevt;
|
||||
bool is_max_delta;
|
||||
|
||||
/*
|
||||
* Pretend that there is no timer pending if the cpu is offline.
|
||||
@@ -1506,6 +1507,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||
|
||||
spin_lock(&base->lock);
|
||||
nextevt = __next_timer_interrupt(base);
|
||||
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
|
||||
base->next_expiry = nextevt;
|
||||
/*
|
||||
* We have a fresh next event. Check whether we can forward the base:
|
||||
@@ -1519,7 +1521,8 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||
expires = basem;
|
||||
base->is_idle = false;
|
||||
} else {
|
||||
expires = basem + (nextevt - basej) * TICK_NSEC;
|
||||
if (!is_max_delta)
|
||||
expires = basem + (nextevt - basej) * TICK_NSEC;
|
||||
/*
|
||||
* If we expect to sleep more than a tick, mark the base idle:
|
||||
*/
|
||||
|
Ссылка в новой задаче
Block a user