Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Lots of overlapping changes. Also on the net-next side the XDP state management is handled more in the generic layers so undo the 'net' nfp fix which isn't applicable in net-next. Include a necessary change by Jakub Kicinski, with log message: ==================== cls_bpf no longer takes care of offload tracking. Make sure netdevsim performs necessary checks. This fixes a warning caused by TC trying to remove a filter it has not added. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -1417,6 +1417,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
|
||||
break;
|
||||
case PTR_TO_STACK:
|
||||
pointer_desc = "stack ";
|
||||
/* The stack spill tracking logic in check_stack_write()
|
||||
* and check_stack_read() relies on stack accesses being
|
||||
* aligned.
|
||||
*/
|
||||
strict = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -1473,6 +1478,29 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
||||
return env->subprog_stack_depth[subprog];
|
||||
}
|
||||
|
||||
/* truncate register to smaller size (in bytes)
|
||||
* must be called with size < BPF_REG_SIZE
|
||||
*/
|
||||
static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
|
||||
{
|
||||
u64 mask;
|
||||
|
||||
/* clear high bits in bit representation */
|
||||
reg->var_off = tnum_cast(reg->var_off, size);
|
||||
|
||||
/* fix arithmetic bounds */
|
||||
mask = ((u64)1 << (size * 8)) - 1;
|
||||
if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
|
||||
reg->umin_value &= mask;
|
||||
reg->umax_value &= mask;
|
||||
} else {
|
||||
reg->umin_value = 0;
|
||||
reg->umax_value = mask;
|
||||
}
|
||||
reg->smin_value = reg->umin_value;
|
||||
reg->smax_value = reg->umax_value;
|
||||
}
|
||||
|
||||
/* check whether memory at (regno + off) is accessible for t = (read | write)
|
||||
* if t==write, value_regno is a register which value is stored into memory
|
||||
* if t==read, value_regno is a register which will receive the value from memory
|
||||
@@ -1608,9 +1636,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
|
||||
regs[value_regno].type == SCALAR_VALUE) {
|
||||
/* b/h/w load zero-extends, mark upper bits as known 0 */
|
||||
regs[value_regno].var_off =
|
||||
tnum_cast(regs[value_regno].var_off, size);
|
||||
__update_reg_bounds(®s[value_regno]);
|
||||
coerce_reg_to_size(®s[value_regno], size);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@@ -1684,6 +1710,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
|
||||
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
||||
verbose(env, "invalid variable stack read R%d var_off=%s\n",
|
||||
regno, tn_buf);
|
||||
return -EACCES;
|
||||
}
|
||||
off = reg->off + reg->var_off.value;
|
||||
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
|
||||
@@ -2206,7 +2233,13 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* With LD_ABS/IND some JITs save/restore skb from r1. */
|
||||
changes_data = bpf_helper_changes_pkt_data(fn->func);
|
||||
if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
|
||||
verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
|
||||
func_id_name(func_id), func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&meta, 0, sizeof(meta));
|
||||
meta.pkt_access = fn->pkt_access;
|
||||
@@ -2298,14 +2331,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void coerce_reg_to_32(struct bpf_reg_state *reg)
|
||||
{
|
||||
/* clear high 32 bits */
|
||||
reg->var_off = tnum_cast(reg->var_off, 4);
|
||||
/* Update bounds */
|
||||
__update_reg_bounds(reg);
|
||||
}
|
||||
|
||||
static bool signed_add_overflows(s64 a, s64 b)
|
||||
{
|
||||
/* Do the add in u64, where overflow is well-defined */
|
||||
@@ -2326,6 +2351,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
|
||||
return res > a;
|
||||
}
|
||||
|
||||
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
enum bpf_reg_type type)
|
||||
{
|
||||
bool known = tnum_is_const(reg->var_off);
|
||||
s64 val = reg->var_off.value;
|
||||
s64 smin = reg->smin_value;
|
||||
|
||||
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
|
||||
verbose(env, "math between %s pointer and %lld is not allowed\n",
|
||||
reg_type_str[type], val);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
|
||||
verbose(env, "%s pointer offset %d is not allowed\n",
|
||||
reg_type_str[type], reg->off);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (smin == S64_MIN) {
|
||||
verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
|
||||
reg_type_str[type]);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
|
||||
verbose(env, "value %lld makes %s pointer be out of bounds\n",
|
||||
smin, reg_type_str[type]);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
|
||||
* Caller should also handle BPF_MOV case separately.
|
||||
* If we return -EACCES, caller may want to try again treating pointer as a
|
||||
@@ -2364,29 +2424,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
|
||||
if (BPF_CLASS(insn->code) != BPF_ALU64) {
|
||||
/* 32-bit ALU ops on pointers produce (meaningless) scalars */
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env,
|
||||
"R%d 32-bit pointer arithmetic prohibited\n",
|
||||
dst);
|
||||
verbose(env,
|
||||
"R%d 32-bit pointer arithmetic prohibited\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
|
||||
dst);
|
||||
verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
if (ptr_reg->type == CONST_PTR_TO_MAP) {
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
|
||||
dst);
|
||||
verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
if (ptr_reg->type == PTR_TO_PACKET_END) {
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
|
||||
dst);
|
||||
verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
@@ -2396,6 +2452,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
dst_reg->type = ptr_reg->type;
|
||||
dst_reg->id = ptr_reg->id;
|
||||
|
||||
if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
|
||||
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
|
||||
return -EINVAL;
|
||||
|
||||
switch (opcode) {
|
||||
case BPF_ADD:
|
||||
/* We can take a fixed offset as long as it doesn't overflow
|
||||
@@ -2449,9 +2509,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
case BPF_SUB:
|
||||
if (dst_reg == off_reg) {
|
||||
/* scalar -= pointer. Creates an unknown scalar */
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d tried to subtract pointer from scalar\n",
|
||||
dst);
|
||||
verbose(env, "R%d tried to subtract pointer from scalar\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
/* We don't allow subtraction from FP, because (according to
|
||||
@@ -2459,9 +2518,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
* be able to deal with it.
|
||||
*/
|
||||
if (ptr_reg->type == PTR_TO_STACK) {
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d subtraction from stack pointer prohibited\n",
|
||||
dst);
|
||||
verbose(env, "R%d subtraction from stack pointer prohibited\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
if (known && (ptr_reg->off - smin_val ==
|
||||
@@ -2510,28 +2568,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
case BPF_AND:
|
||||
case BPF_OR:
|
||||
case BPF_XOR:
|
||||
/* bitwise ops on pointers are troublesome, prohibit for now.
|
||||
* (However, in principle we could allow some cases, e.g.
|
||||
* ptr &= ~3 which would reduce min_value by 3.)
|
||||
*/
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
|
||||
dst, bpf_alu_string[opcode >> 4]);
|
||||
/* bitwise ops on pointers are troublesome, prohibit. */
|
||||
verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
|
||||
dst, bpf_alu_string[opcode >> 4]);
|
||||
return -EACCES;
|
||||
default:
|
||||
/* other operators (e.g. MUL,LSH) produce non-pointer results */
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
|
||||
dst, bpf_alu_string[opcode >> 4]);
|
||||
verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
|
||||
dst, bpf_alu_string[opcode >> 4]);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
|
||||
return -EINVAL;
|
||||
|
||||
__update_reg_bounds(dst_reg);
|
||||
__reg_deduce_bounds(dst_reg);
|
||||
__reg_bound_offset(dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* WARNING: This function does calculations on 64-bit values, but the actual
|
||||
* execution may occur on 32-bit values. Therefore, things like bitshifts
|
||||
* need extra checks in the 32-bit case.
|
||||
*/
|
||||
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
||||
struct bpf_insn *insn,
|
||||
struct bpf_reg_state *dst_reg,
|
||||
@@ -2542,12 +2602,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
||||
bool src_known, dst_known;
|
||||
s64 smin_val, smax_val;
|
||||
u64 umin_val, umax_val;
|
||||
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
|
||||
|
||||
if (BPF_CLASS(insn->code) != BPF_ALU64) {
|
||||
/* 32-bit ALU ops are (32,32)->64 */
|
||||
coerce_reg_to_32(dst_reg);
|
||||
coerce_reg_to_32(&src_reg);
|
||||
}
|
||||
smin_val = src_reg.smin_value;
|
||||
smax_val = src_reg.smax_value;
|
||||
umin_val = src_reg.umin_value;
|
||||
@@ -2555,6 +2611,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
||||
src_known = tnum_is_const(src_reg.var_off);
|
||||
dst_known = tnum_is_const(dst_reg->var_off);
|
||||
|
||||
if (!src_known &&
|
||||
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
|
||||
__mark_reg_unknown(dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (opcode) {
|
||||
case BPF_ADD:
|
||||
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
|
||||
@@ -2683,9 +2745,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
||||
__update_reg_bounds(dst_reg);
|
||||
break;
|
||||
case BPF_LSH:
|
||||
if (umax_val > 63) {
|
||||
/* Shifts greater than 63 are undefined. This includes
|
||||
* shifts by a negative number.
|
||||
if (umax_val >= insn_bitness) {
|
||||
/* Shifts greater than 31 or 63 are undefined.
|
||||
* This includes shifts by a negative number.
|
||||
*/
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
break;
|
||||
@@ -2711,27 +2773,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
||||
__update_reg_bounds(dst_reg);
|
||||
break;
|
||||
case BPF_RSH:
|
||||
if (umax_val > 63) {
|
||||
/* Shifts greater than 63 are undefined. This includes
|
||||
* shifts by a negative number.
|
||||
if (umax_val >= insn_bitness) {
|
||||
/* Shifts greater than 31 or 63 are undefined.
|
||||
* This includes shifts by a negative number.
|
||||
*/
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
break;
|
||||
}
|
||||
/* BPF_RSH is an unsigned shift, so make the appropriate casts */
|
||||
if (dst_reg->smin_value < 0) {
|
||||
if (umin_val) {
|
||||
/* Sign bit will be cleared */
|
||||
dst_reg->smin_value = 0;
|
||||
} else {
|
||||
/* Lost sign bit information */
|
||||
dst_reg->smin_value = S64_MIN;
|
||||
dst_reg->smax_value = S64_MAX;
|
||||
}
|
||||
} else {
|
||||
dst_reg->smin_value =
|
||||
(u64)(dst_reg->smin_value) >> umax_val;
|
||||
}
|
||||
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
|
||||
* be negative, then either:
|
||||
* 1) src_reg might be zero, so the sign bit of the result is
|
||||
* unknown, so we lose our signed bounds
|
||||
* 2) it's known negative, thus the unsigned bounds capture the
|
||||
* signed bounds
|
||||
* 3) the signed bounds cross zero, so they tell us nothing
|
||||
* about the result
|
||||
* If the value in dst_reg is known nonnegative, then again the
|
||||
* unsigned bounts capture the signed bounds.
|
||||
* Thus, in all cases it suffices to blow away our signed bounds
|
||||
* and rely on inferring new ones from the unsigned bounds and
|
||||
* var_off of the result.
|
||||
*/
|
||||
dst_reg->smin_value = S64_MIN;
|
||||
dst_reg->smax_value = S64_MAX;
|
||||
if (src_known)
|
||||
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
|
||||
umin_val);
|
||||
@@ -2747,6 +2811,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
||||
break;
|
||||
}
|
||||
|
||||
if (BPF_CLASS(insn->code) != BPF_ALU64) {
|
||||
/* 32-bit ALU ops are (32,32)->32 */
|
||||
coerce_reg_to_size(dst_reg, 4);
|
||||
coerce_reg_to_size(&src_reg, 4);
|
||||
}
|
||||
|
||||
__reg_deduce_bounds(dst_reg);
|
||||
__reg_bound_offset(dst_reg);
|
||||
return 0;
|
||||
@@ -2763,7 +2833,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
|
||||
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
|
||||
u8 opcode = BPF_OP(insn->code);
|
||||
int rc;
|
||||
|
||||
dst_reg = ®s[insn->dst_reg];
|
||||
src_reg = NULL;
|
||||
@@ -2774,43 +2843,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
if (src_reg->type != SCALAR_VALUE) {
|
||||
if (dst_reg->type != SCALAR_VALUE) {
|
||||
/* Combining two pointers by any ALU op yields
|
||||
* an arbitrary scalar.
|
||||
* an arbitrary scalar. Disallow all math except
|
||||
* pointer subtraction
|
||||
*/
|
||||
if (!env->allow_ptr_leaks) {
|
||||
verbose(env, "R%d pointer %s pointer prohibited\n",
|
||||
insn->dst_reg,
|
||||
bpf_alu_string[opcode >> 4]);
|
||||
return -EACCES;
|
||||
if (opcode == BPF_SUB){
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
return 0;
|
||||
}
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
return 0;
|
||||
verbose(env, "R%d pointer %s pointer prohibited\n",
|
||||
insn->dst_reg,
|
||||
bpf_alu_string[opcode >> 4]);
|
||||
return -EACCES;
|
||||
} else {
|
||||
/* scalar += pointer
|
||||
* This is legal, but we have to reverse our
|
||||
* src/dest handling in computing the range
|
||||
*/
|
||||
rc = adjust_ptr_min_max_vals(env, insn,
|
||||
src_reg, dst_reg);
|
||||
if (rc == -EACCES && env->allow_ptr_leaks) {
|
||||
/* scalar += unknown scalar */
|
||||
__mark_reg_unknown(&off_reg);
|
||||
return adjust_scalar_min_max_vals(
|
||||
env, insn,
|
||||
dst_reg, off_reg);
|
||||
}
|
||||
return rc;
|
||||
return adjust_ptr_min_max_vals(env, insn,
|
||||
src_reg, dst_reg);
|
||||
}
|
||||
} else if (ptr_reg) {
|
||||
/* pointer += scalar */
|
||||
rc = adjust_ptr_min_max_vals(env, insn,
|
||||
dst_reg, src_reg);
|
||||
if (rc == -EACCES && env->allow_ptr_leaks) {
|
||||
/* unknown scalar += scalar */
|
||||
__mark_reg_unknown(dst_reg);
|
||||
return adjust_scalar_min_max_vals(
|
||||
env, insn, dst_reg, *src_reg);
|
||||
}
|
||||
return rc;
|
||||
return adjust_ptr_min_max_vals(env, insn,
|
||||
dst_reg, src_reg);
|
||||
}
|
||||
} else {
|
||||
/* Pretend the src is a reg with a known value, since we only
|
||||
@@ -2819,17 +2874,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
off_reg.type = SCALAR_VALUE;
|
||||
__mark_reg_known(&off_reg, insn->imm);
|
||||
src_reg = &off_reg;
|
||||
if (ptr_reg) { /* pointer += K */
|
||||
rc = adjust_ptr_min_max_vals(env, insn,
|
||||
ptr_reg, src_reg);
|
||||
if (rc == -EACCES && env->allow_ptr_leaks) {
|
||||
/* unknown scalar += K */
|
||||
__mark_reg_unknown(dst_reg);
|
||||
return adjust_scalar_min_max_vals(
|
||||
env, insn, dst_reg, off_reg);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
if (ptr_reg) /* pointer += K */
|
||||
return adjust_ptr_min_max_vals(env, insn,
|
||||
ptr_reg, src_reg);
|
||||
}
|
||||
|
||||
/* Got here implies adding two SCALAR_VALUEs */
|
||||
@@ -2926,17 +2973,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
return -EACCES;
|
||||
}
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
/* high 32 bits are known zero. */
|
||||
regs[insn->dst_reg].var_off = tnum_cast(
|
||||
regs[insn->dst_reg].var_off, 4);
|
||||
__update_reg_bounds(®s[insn->dst_reg]);
|
||||
coerce_reg_to_size(®s[insn->dst_reg], 4);
|
||||
}
|
||||
} else {
|
||||
/* case: R = imm
|
||||
* remember the value we stored into this reg
|
||||
*/
|
||||
regs[insn->dst_reg].type = SCALAR_VALUE;
|
||||
__mark_reg_known(regs + insn->dst_reg, insn->imm);
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64) {
|
||||
__mark_reg_known(regs + insn->dst_reg,
|
||||
insn->imm);
|
||||
} else {
|
||||
__mark_reg_known(regs + insn->dst_reg,
|
||||
(u32)insn->imm);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (opcode > BPF_END) {
|
||||
@@ -4013,15 +4063,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
return range_within(rold, rcur) &&
|
||||
tnum_in(rold->var_off, rcur->var_off);
|
||||
} else {
|
||||
/* if we knew anything about the old value, we're not
|
||||
* equal, because we can't know anything about the
|
||||
* scalar value of the pointer in the new value.
|
||||
/* We're trying to use a pointer in place of a scalar.
|
||||
* Even if the scalar was unbounded, this could lead to
|
||||
* pointer leaks because scalars are allowed to leak
|
||||
* while pointers are not. We could make this safe in
|
||||
* special cases if root is calling us, but it's
|
||||
* probably not worth the hassle.
|
||||
*/
|
||||
return rold->umin_value == 0 &&
|
||||
rold->umax_value == U64_MAX &&
|
||||
rold->smin_value == S64_MIN &&
|
||||
rold->smax_value == S64_MAX &&
|
||||
tnum_is_unknown(rold->var_off);
|
||||
return false;
|
||||
}
|
||||
case PTR_TO_MAP_VALUE:
|
||||
/* If the new min/max/var_off satisfy the old ones and
|
||||
|
@@ -434,17 +434,22 @@ static struct pid *good_sigevent(sigevent_t * event)
|
||||
{
|
||||
struct task_struct *rtn = current->group_leader;
|
||||
|
||||
if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
|
||||
(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
|
||||
!same_thread_group(rtn, current) ||
|
||||
(event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
|
||||
switch (event->sigev_notify) {
|
||||
case SIGEV_SIGNAL | SIGEV_THREAD_ID:
|
||||
rtn = find_task_by_vpid(event->sigev_notify_thread_id);
|
||||
if (!rtn || !same_thread_group(rtn, current))
|
||||
return NULL;
|
||||
/* FALLTHRU */
|
||||
case SIGEV_SIGNAL:
|
||||
case SIGEV_THREAD:
|
||||
if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
|
||||
return NULL;
|
||||
/* FALLTHRU */
|
||||
case SIGEV_NONE:
|
||||
return task_pid(rtn);
|
||||
default:
|
||||
return NULL;
|
||||
|
||||
if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
|
||||
((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
|
||||
return NULL;
|
||||
|
||||
return task_pid(rtn);
|
||||
}
|
||||
}
|
||||
|
||||
static struct k_itimer * alloc_posix_timer(void)
|
||||
@@ -669,7 +674,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
|
||||
struct timespec64 ts64;
|
||||
bool sig_none;
|
||||
|
||||
sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
|
||||
sig_none = timr->it_sigev_notify == SIGEV_NONE;
|
||||
iv = timr->it_interval;
|
||||
|
||||
/* interval timer ? */
|
||||
@@ -856,7 +861,7 @@ int common_timer_set(struct k_itimer *timr, int flags,
|
||||
|
||||
timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
|
||||
expires = timespec64_to_ktime(new_setting->it_value);
|
||||
sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
|
||||
sigev_none = timr->it_sigev_notify == SIGEV_NONE;
|
||||
|
||||
kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
|
||||
timr->it_active = !sigev_none;
|
||||
|
Reference in New Issue
Block a user