Merge tag 'v4.19-rc5' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tento commit je obsažen v:
@@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env)
|
||||
|
||||
hdr = &btf->hdr;
|
||||
cur = btf->nohdr_data + hdr->type_off;
|
||||
end = btf->nohdr_data + hdr->type_len;
|
||||
end = cur + hdr->type_len;
|
||||
|
||||
env->log_type_id = 1;
|
||||
while (cur < end) {
|
||||
|
@@ -3163,7 +3163,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
* an arbitrary scalar. Disallow all math except
|
||||
* pointer subtraction
|
||||
*/
|
||||
if (opcode == BPF_SUB){
|
||||
if (opcode == BPF_SUB && env->allow_ptr_leaks) {
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -5943,6 +5943,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
|
||||
unsigned long sp;
|
||||
unsigned int rem;
|
||||
u64 dyn_size;
|
||||
mm_segment_t fs;
|
||||
|
||||
/*
|
||||
* We dump:
|
||||
@@ -5960,7 +5961,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
|
||||
|
||||
/* Data. */
|
||||
sp = perf_user_stack_pointer(regs);
|
||||
fs = get_fs();
|
||||
set_fs(USER_DS);
|
||||
rem = __output_copy_user(handle, (void *) sp, dump_size);
|
||||
set_fs(fs);
|
||||
dyn_size = dump_size - rem;
|
||||
|
||||
perf_output_skip(handle, rem);
|
||||
|
@@ -678,7 +678,7 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
|
||||
case MODULE_STATE_COMING:
|
||||
ret = jump_label_add_module(mod);
|
||||
if (ret) {
|
||||
WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
|
||||
WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
|
||||
jump_label_del_module(mod);
|
||||
}
|
||||
break;
|
||||
|
@@ -55,7 +55,6 @@
|
||||
|
||||
#include "lockdep_internals.h"
|
||||
|
||||
#include <trace/events/preemptirq.h>
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/lock.h>
|
||||
|
||||
|
@@ -389,7 +389,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
|
||||
/*
|
||||
* wake_up_process() paired with set_current_state()
|
||||
* inserts sufficient barriers to make sure @owner either sees
|
||||
* it's wounded in __ww_mutex_lock_check_stamp() or has a
|
||||
* it's wounded in __ww_mutex_check_kill() or has a
|
||||
* wakeup pending to re-read the wounded state.
|
||||
*/
|
||||
if (owner != current)
|
||||
@@ -946,7 +946,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
}
|
||||
|
||||
debug_mutex_lock_common(lock, &waiter);
|
||||
debug_mutex_add_waiter(lock, &waiter, current);
|
||||
|
||||
lock_contended(&lock->dep_map, ip);
|
||||
|
||||
|
@@ -324,7 +324,7 @@ static int __test_cycle(unsigned int nthreads)
|
||||
if (!cycle->result)
|
||||
continue;
|
||||
|
||||
pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
|
||||
pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
|
||||
n, nthreads, cycle->result);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
|
||||
idr_preload_end();
|
||||
|
||||
if (nr < 0) {
|
||||
retval = nr;
|
||||
retval = (nr == -ENOSPC) ? -EAGAIN : nr;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@@ -351,7 +351,6 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
|
||||
*/
|
||||
|
||||
enum log_flags {
|
||||
LOG_NOCONS = 1, /* suppress print, do not print to console */
|
||||
LOG_NEWLINE = 2, /* text ended with a newline */
|
||||
LOG_PREFIX = 4, /* text started with a prefix */
|
||||
LOG_CONT = 8, /* text is a fragment of a continuation line */
|
||||
@@ -1881,9 +1880,6 @@ int vprintk_store(int facility, int level,
|
||||
if (dict)
|
||||
lflags |= LOG_PREFIX|LOG_NEWLINE;
|
||||
|
||||
if (suppress_message_printing(level))
|
||||
lflags |= LOG_NOCONS;
|
||||
|
||||
return log_output(facility, level, lflags,
|
||||
dict, dictlen, text, text_len);
|
||||
}
|
||||
@@ -2032,6 +2028,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
|
||||
const char *text, size_t len) {}
|
||||
static size_t msg_print_text(const struct printk_log *msg,
|
||||
bool syslog, char *buf, size_t size) { return 0; }
|
||||
static bool suppress_message_printing(int level) { return false; }
|
||||
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
@@ -2368,10 +2365,11 @@ skip:
|
||||
break;
|
||||
|
||||
msg = log_from_idx(console_idx);
|
||||
if (msg->flags & LOG_NOCONS) {
|
||||
if (suppress_message_printing(msg->level)) {
|
||||
/*
|
||||
* Skip record if !ignore_loglevel, and
|
||||
* record has level above the console loglevel.
|
||||
* Skip record we have buffered and already printed
|
||||
* directly to the console when we received it, and
|
||||
* record that has level above the console loglevel.
|
||||
*/
|
||||
console_idx = log_next(console_idx);
|
||||
console_seq++;
|
||||
|
@@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
|
||||
|
||||
static void sched_feat_disable(int i)
|
||||
{
|
||||
static_key_disable(&sched_feat_keys[i]);
|
||||
static_key_disable_cpuslocked(&sched_feat_keys[i]);
|
||||
}
|
||||
|
||||
static void sched_feat_enable(int i)
|
||||
{
|
||||
static_key_enable(&sched_feat_keys[i]);
|
||||
static_key_enable_cpuslocked(&sched_feat_keys[i]);
|
||||
}
|
||||
#else
|
||||
static void sched_feat_disable(int i) { };
|
||||
@@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
|
||||
|
||||
/* Ensure the static_key remains in a consistent state */
|
||||
inode = file_inode(filp);
|
||||
cpus_read_lock();
|
||||
inode_lock(inode);
|
||||
ret = sched_feat_set(cmp);
|
||||
inode_unlock(inode);
|
||||
cpus_read_unlock();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@@ -3362,6 +3362,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
* attach_entity_load_avg - attach this entity to its cfs_rq load avg
|
||||
* @cfs_rq: cfs_rq to attach to
|
||||
* @se: sched_entity to attach
|
||||
* @flags: migration hints
|
||||
*
|
||||
* Must call update_cfs_rq_load_avg() before this, since we rely on
|
||||
* cfs_rq->avg.last_update_time being current.
|
||||
@@ -7263,6 +7264,7 @@ static void update_blocked_averages(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
struct cfs_rq *cfs_rq, *pos;
|
||||
const struct sched_class *curr_class;
|
||||
struct rq_flags rf;
|
||||
bool done = true;
|
||||
|
||||
@@ -7299,8 +7301,10 @@ static void update_blocked_averages(int cpu)
|
||||
if (cfs_rq_has_blocked(cfs_rq))
|
||||
done = false;
|
||||
}
|
||||
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
|
||||
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
|
||||
|
||||
curr_class = rq->curr->sched_class;
|
||||
update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
|
||||
update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
|
||||
update_irq_load_avg(rq, 0);
|
||||
/* Don't need periodic decay once load/util_avg are null */
|
||||
if (others_have_blocked(rq))
|
||||
@@ -7365,13 +7369,16 @@ static inline void update_blocked_averages(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
struct cfs_rq *cfs_rq = &rq->cfs;
|
||||
const struct sched_class *curr_class;
|
||||
struct rq_flags rf;
|
||||
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
update_rq_clock(rq);
|
||||
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
|
||||
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
|
||||
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
|
||||
|
||||
curr_class = rq->curr->sched_class;
|
||||
update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
|
||||
update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
|
||||
update_irq_load_avg(rq, 0);
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
rq->last_blocked_load_update_tick = jiffies;
|
||||
@@ -7482,10 +7489,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
||||
return load_idx;
|
||||
}
|
||||
|
||||
static unsigned long scale_rt_capacity(int cpu)
|
||||
static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
|
||||
unsigned long max = arch_scale_cpu_capacity(sd, cpu);
|
||||
unsigned long used, free;
|
||||
unsigned long irq;
|
||||
|
||||
@@ -7507,7 +7514,7 @@ static unsigned long scale_rt_capacity(int cpu)
|
||||
|
||||
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
unsigned long capacity = scale_rt_capacity(cpu);
|
||||
unsigned long capacity = scale_rt_capacity(sd, cpu);
|
||||
struct sched_group *sdg = sd->groups;
|
||||
|
||||
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
|
||||
@@ -8269,7 +8276,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
|
||||
force_balance:
|
||||
/* Looks like there is an imbalance. Compute it */
|
||||
calculate_imbalance(env, &sds);
|
||||
return sds.busiest;
|
||||
return env->imbalance ? sds.busiest : NULL;
|
||||
|
||||
out_balanced:
|
||||
env->imbalance = 0;
|
||||
@@ -9638,7 +9645,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
|
||||
* - A task which has been woken up by try_to_wake_up() and
|
||||
* waiting for actually being woken up by sched_ttwu_pending().
|
||||
*/
|
||||
if (!se->sum_exec_runtime || p->state == TASK_WAKING)
|
||||
if (!se->sum_exec_runtime ||
|
||||
(p->state == TASK_WAKING && p->sched_remote_wakeup))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void)
|
||||
|
||||
n = sched_max_numa_distance;
|
||||
|
||||
if (sched_domains_numa_levels <= 1) {
|
||||
if (sched_domains_numa_levels <= 2) {
|
||||
sched_numa_topology_type = NUMA_DIRECT;
|
||||
return;
|
||||
}
|
||||
@@ -1380,9 +1380,6 @@ void sched_init_numa(void)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!level)
|
||||
return;
|
||||
|
||||
/*
|
||||
* 'level' contains the number of unique distances
|
||||
*
|
||||
|
@@ -71,9 +71,6 @@
|
||||
#include <asm/io.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
/* Hardening for Spectre-v1 */
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include "uid16.h"
|
||||
|
||||
#ifndef SET_UNALIGN_CTL
|
||||
|
@@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
|
||||
tmp_iter_page = first_page;
|
||||
|
||||
do {
|
||||
cond_resched();
|
||||
|
||||
to_remove_page = tmp_iter_page;
|
||||
rb_inc_page(cpu_buffer, &tmp_iter_page);
|
||||
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele