Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2018-12-15 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) fix liveness propagation of callee saved registers, from Jakub. 2) fix overflow in bpf_jit_limit knob, from Daniel. 3) bpf_flow_dissector api fix, from Stanislav. 4) bpf_perf_event api fix on powerpc, from Sandipan. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -365,13 +365,11 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
|
||||
|
||||
/* All BPF JIT sysctl knobs here. */
|
||||
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
|
||||
int bpf_jit_harden __read_mostly;
|
||||
int bpf_jit_kallsyms __read_mostly;
|
||||
int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
|
||||
long bpf_jit_limit __read_mostly;
|
||||
|
||||
static __always_inline void
|
||||
bpf_get_prog_addr_region(const struct bpf_prog *prog,
|
||||
@@ -580,16 +578,27 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
|
||||
static atomic_long_t bpf_jit_current;
|
||||
|
||||
/* Can be overridden by an arch's JIT compiler if it has a custom,
|
||||
* dedicated BPF backend memory area, or if neither of the two
|
||||
* below apply.
|
||||
*/
|
||||
u64 __weak bpf_jit_alloc_exec_limit(void)
|
||||
{
|
||||
#if defined(MODULES_VADDR)
|
||||
return MODULES_END - MODULES_VADDR;
|
||||
#else
|
||||
return VMALLOC_END - VMALLOC_START;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init bpf_jit_charge_init(void)
|
||||
{
|
||||
/* Only used as heuristic here to derive limit. */
|
||||
bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
|
||||
PAGE_SIZE), INT_MAX);
|
||||
bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
|
||||
PAGE_SIZE), LONG_MAX);
|
||||
return 0;
|
||||
}
|
||||
pure_initcall(bpf_jit_charge_init);
|
||||
#endif
|
||||
|
||||
static int bpf_jit_charge_modmem(u32 pages)
|
||||
{
|
||||
|
@@ -5102,9 +5102,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
|
||||
}
|
||||
new_sl->next = env->explored_states[insn_idx];
|
||||
env->explored_states[insn_idx] = new_sl;
|
||||
/* connect new state to parentage chain */
|
||||
for (i = 0; i < BPF_REG_FP; i++)
|
||||
cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i];
|
||||
/* connect new state to parentage chain. Current frame needs all
|
||||
* registers connected. Only r6 - r9 of the callers are alive (pushed
|
||||
* to the stack implicitly by JITs) so in callers' frames connect just
|
||||
* r6 - r9 as an optimization. Callers will have r1 - r5 connected to
|
||||
* the state of the call instruction (with WRITTEN set), and r0 comes
|
||||
* from callee with its full parentage chain, anyway.
|
||||
*/
|
||||
for (j = 0; j <= cur->curframe; j++)
|
||||
for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
|
||||
cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
|
||||
/* clear write marks in current state: the writes we did are not writes
|
||||
* our child did, so they don't screen off its reads from us.
|
||||
* (There are no read marks in current state, because reads always mark
|
||||
|
Reference in New Issue
Block a user