Merge tag 'v4.16-rc6' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
このコミットが含まれているのは:
@@ -1356,6 +1356,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
|
||||
return reg->type == PTR_TO_CTX;
|
||||
}
|
||||
|
||||
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
|
||||
{
|
||||
const struct bpf_reg_state *reg = cur_regs(env) + regno;
|
||||
|
||||
return type_is_pkt_pointer(reg->type);
|
||||
}
|
||||
|
||||
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, bool strict)
|
||||
@@ -1416,10 +1423,10 @@ static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
|
||||
}
|
||||
|
||||
static int check_ptr_alignment(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size)
|
||||
const struct bpf_reg_state *reg, int off,
|
||||
int size, bool strict_alignment_once)
|
||||
{
|
||||
bool strict = env->strict_alignment;
|
||||
bool strict = env->strict_alignment || strict_alignment_once;
|
||||
const char *pointer_desc = "";
|
||||
|
||||
switch (reg->type) {
|
||||
@@ -1576,9 +1583,9 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
|
||||
* if t==write && value_regno==-1, some unknown value is stored into memory
|
||||
* if t==read && value_regno==-1, don't care what we read from memory
|
||||
*/
|
||||
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
|
||||
int bpf_size, enum bpf_access_type t,
|
||||
int value_regno)
|
||||
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
|
||||
int off, int bpf_size, enum bpf_access_type t,
|
||||
int value_regno, bool strict_alignment_once)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
struct bpf_reg_state *reg = regs + regno;
|
||||
@@ -1590,7 +1597,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
return size;
|
||||
|
||||
/* alignment checks will add in reg->off themselves */
|
||||
err = check_ptr_alignment(env, reg, off, size);
|
||||
err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -1735,21 +1742,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (is_ctx_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
|
||||
insn->dst_reg);
|
||||
if (is_ctx_reg(env, insn->dst_reg) ||
|
||||
is_pkt_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
|
||||
insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
|
||||
"context" : "packet");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* check whether atomic_add can read the memory */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_READ, -1);
|
||||
BPF_SIZE(insn->code), BPF_READ, -1, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* check whether atomic_add can write into the same memory */
|
||||
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE, -1);
|
||||
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
|
||||
}
|
||||
|
||||
/* when register 'regno' is passed into function that will read 'access_size'
|
||||
@@ -2388,7 +2397,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
||||
* is inferred from register state.
|
||||
*/
|
||||
for (i = 0; i < meta.access_size; i++) {
|
||||
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
|
||||
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
|
||||
BPF_WRITE, -1, false);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@@ -4632,7 +4642,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
*/
|
||||
err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_READ,
|
||||
insn->dst_reg);
|
||||
insn->dst_reg, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -4684,7 +4694,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
/* check that memory (dst_reg + off) is writeable */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE,
|
||||
insn->src_reg);
|
||||
insn->src_reg, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -4719,7 +4729,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
/* check that memory (dst_reg + off) is writeable */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE,
|
||||
-1);
|
||||
-1, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@@ -488,25 +488,6 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_compat_sigset);
|
||||
|
||||
int
|
||||
put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
|
||||
unsigned int size)
|
||||
{
|
||||
/* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
|
||||
#ifdef __BIG_ENDIAN
|
||||
compat_sigset_t v;
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
|
||||
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
|
||||
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
|
||||
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
|
||||
}
|
||||
return copy_to_user(compat, &v, size) ? -EFAULT : 0;
|
||||
#else
|
||||
return copy_to_user(compat, set, size) ? -EFAULT : 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
|
||||
compat_uptr_t __user *, pages32,
|
||||
|
@@ -2432,7 +2432,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *task_ctx,
|
||||
enum event_type_t event_type)
|
||||
{
|
||||
enum event_type_t ctx_event_type = event_type & EVENT_ALL;
|
||||
enum event_type_t ctx_event_type;
|
||||
bool cpu_event = !!(event_type & EVENT_CPU);
|
||||
|
||||
/*
|
||||
@@ -2442,6 +2442,8 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
|
||||
if (event_type & EVENT_PINNED)
|
||||
event_type |= EVENT_FLEXIBLE;
|
||||
|
||||
ctx_event_type = event_type & EVENT_ALL;
|
||||
|
||||
perf_pmu_disable(cpuctx->ctx.pmu);
|
||||
if (task_ctx)
|
||||
task_ctx_sched_out(cpuctx, task_ctx, event_type);
|
||||
|
@@ -373,7 +373,8 @@ static void __jump_label_update(struct static_key *key,
|
||||
if (kernel_text_address(entry->code))
|
||||
arch_jump_label_transform(entry, jump_label_type(entry));
|
||||
else
|
||||
WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code);
|
||||
WARN_ONCE(1, "can't patch jump_label at %pS",
|
||||
(void *)(unsigned long)entry->code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1616,11 +1616,12 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
||||
void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
|
||||
{
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
unsigned long flags;
|
||||
bool postunlock;
|
||||
|
||||
raw_spin_lock_irq(&lock->wait_lock);
|
||||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||
postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||
|
||||
if (postunlock)
|
||||
rt_mutex_postunlock(&wake_q);
|
||||
|
@@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs);
|
||||
*/
|
||||
__visible void __stack_chk_fail(void)
|
||||
{
|
||||
panic("stack-protector: Kernel stack is corrupted in: %p\n",
|
||||
panic("stack-protector: Kernel stack is corrupted in: %pB\n",
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__stack_chk_fail);
|
||||
|
新しいイシューから参照
ユーザーをブロックする