Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support SPI based w5100 devices, from Akinobu Mita. 2) Partial Segmentation Offload, from Alexander Duyck. 3) Add GMAC4 support to stmmac driver, from Alexandre TORGUE. 4) Allow cls_flower stats offload, from Amir Vadai. 5) Implement bpf blinding, from Daniel Borkmann. 6) Optimize _ASYNC_ bit twiddling on sockets, unless the socket is actually using FASYNC these atomics are superfluous. From Eric Dumazet. 7) Run TCP more preemptibly, also from Eric Dumazet. 8) Support LED blinking, EEPROM dumps, and rxvlan offloading in mlx5e driver, from Gal Pressman. 9) Allow creating ppp devices via rtnetlink, from Guillaume Nault. 10) Improve BPF usage documentation, from Jesper Dangaard Brouer. 11) Support tunneling offloads in qed, from Manish Chopra. 12) aRFS offloading in mlx5e, from Maor Gottlieb. 13) Add RFS and RPS support to SCTP protocol, from Marcelo Ricardo Leitner. 14) Add MSG_EOR support to TCP, this allows controlling packet coalescing on application record boundaries for more accurate socket timestamp sampling. From Martin KaFai Lau. 15) Fix alignment of 64-bit netlink attributes across the board, from Nicolas Dichtel. 16) Per-vlan stats in bridging, from Nikolay Aleksandrov. 17) Several conversions of drivers to ethtool ksettings, from Philippe Reynes. 18) Checksum neutral ILA in ipv6, from Tom Herbert. 19) Factorize all of the various marvell dsa drivers into one, from Vivien Didelot 20) Add VF support to qed driver, from Yuval Mintz" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1649 commits) Revert "phy dp83867: Fix compilation with CONFIG_OF_MDIO=m" Revert "phy dp83867: Make rgmii parameters optional" r8169: default to 64-bit DMA on recent PCIe chips phy dp83867: Make rgmii parameters optional phy dp83867: Fix compilation with CONFIG_OF_MDIO=m bpf: arm64: remove callee-save registers use for tmp registers asix: Fix offset calculation in asix_rx_fixup() causing slow transmissions switchdev: pass pointer to fib_info instead of copy net_sched: close another race condition in tcf_mirred_release() tipc: fix nametable publication field in nl compat drivers: net: Don't print unpopulated net_device name qed: add support for dcbx. ravb: Add missing free_irq() calls to ravb_close() qed: Remove a stray tab net: ethernet: fec-mpc52xx: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fec-mpc52xx: use phydev from struct net_device bpf, doc: fix typo on bpf_asm descriptions stmmac: hardware TX COE doesn't work when force_thresh_dma_mode is set net: ethernet: fs-enet: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fs-enet: use phydev from struct net_device ...
这个提交包含在:
@@ -129,14 +129,83 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
||||
|
||||
return fp;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_realloc);
|
||||
|
||||
void __bpf_prog_free(struct bpf_prog *fp)
|
||||
{
|
||||
kfree(fp->aux);
|
||||
vfree(fp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__bpf_prog_free);
|
||||
|
||||
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
|
||||
{
|
||||
return BPF_CLASS(insn->code) == BPF_JMP &&
|
||||
/* Call and Exit are both special jumps with no
|
||||
* target inside the BPF instruction image.
|
||||
*/
|
||||
BPF_OP(insn->code) != BPF_CALL &&
|
||||
BPF_OP(insn->code) != BPF_EXIT;
|
||||
}
|
||||
|
||||
static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
|
||||
{
|
||||
struct bpf_insn *insn = prog->insnsi;
|
||||
u32 i, insn_cnt = prog->len;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
if (!bpf_is_jmp_and_has_target(insn))
|
||||
continue;
|
||||
|
||||
/* Adjust offset of jmps if we cross boundaries. */
|
||||
if (i < pos && i + insn->off + 1 > pos)
|
||||
insn->off += delta;
|
||||
else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
|
||||
insn->off -= delta;
|
||||
}
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
const struct bpf_insn *patch, u32 len)
|
||||
{
|
||||
u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
|
||||
struct bpf_prog *prog_adj;
|
||||
|
||||
/* Since our patchlet doesn't expand the image, we're done. */
|
||||
if (insn_delta == 0) {
|
||||
memcpy(prog->insnsi + off, patch, sizeof(*patch));
|
||||
return prog;
|
||||
}
|
||||
|
||||
insn_adj_cnt = prog->len + insn_delta;
|
||||
|
||||
/* Several new instructions need to be inserted. Make room
|
||||
* for them. Likely, there's no need for a new allocation as
|
||||
* last page could have large enough tailroom.
|
||||
*/
|
||||
prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
|
||||
GFP_USER);
|
||||
if (!prog_adj)
|
||||
return NULL;
|
||||
|
||||
prog_adj->len = insn_adj_cnt;
|
||||
|
||||
/* Patching happens in 3 steps:
|
||||
*
|
||||
* 1) Move over tail of insnsi from next instruction onwards,
|
||||
* so we can patch the single target insn with one or more
|
||||
* new ones (patching is always from 1 to n insns, n > 0).
|
||||
* 2) Inject new instructions at the target location.
|
||||
* 3) Adjust branch offsets if necessary.
|
||||
*/
|
||||
insn_rest = insn_adj_cnt - off - len;
|
||||
|
||||
memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
|
||||
sizeof(*patch) * insn_rest);
|
||||
memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
|
||||
|
||||
bpf_adj_branches(prog_adj, off, insn_delta);
|
||||
|
||||
return prog_adj;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
struct bpf_binary_header *
|
||||
@@ -174,6 +243,209 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
|
||||
{
|
||||
module_memfree(hdr);
|
||||
}
|
||||
|
||||
int bpf_jit_harden __read_mostly;
|
||||
|
||||
static int bpf_jit_blind_insn(const struct bpf_insn *from,
|
||||
const struct bpf_insn *aux,
|
||||
struct bpf_insn *to_buff)
|
||||
{
|
||||
struct bpf_insn *to = to_buff;
|
||||
u32 imm_rnd = prandom_u32();
|
||||
s16 off;
|
||||
|
||||
BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
|
||||
BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
|
||||
|
||||
if (from->imm == 0 &&
|
||||
(from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
|
||||
from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
|
||||
*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (from->code) {
|
||||
case BPF_ALU | BPF_ADD | BPF_K:
|
||||
case BPF_ALU | BPF_SUB | BPF_K:
|
||||
case BPF_ALU | BPF_AND | BPF_K:
|
||||
case BPF_ALU | BPF_OR | BPF_K:
|
||||
case BPF_ALU | BPF_XOR | BPF_K:
|
||||
case BPF_ALU | BPF_MUL | BPF_K:
|
||||
case BPF_ALU | BPF_MOV | BPF_K:
|
||||
case BPF_ALU | BPF_DIV | BPF_K:
|
||||
case BPF_ALU | BPF_MOD | BPF_K:
|
||||
*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
|
||||
*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
|
||||
*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
|
||||
break;
|
||||
|
||||
case BPF_ALU64 | BPF_ADD | BPF_K:
|
||||
case BPF_ALU64 | BPF_SUB | BPF_K:
|
||||
case BPF_ALU64 | BPF_AND | BPF_K:
|
||||
case BPF_ALU64 | BPF_OR | BPF_K:
|
||||
case BPF_ALU64 | BPF_XOR | BPF_K:
|
||||
case BPF_ALU64 | BPF_MUL | BPF_K:
|
||||
case BPF_ALU64 | BPF_MOV | BPF_K:
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K:
|
||||
case BPF_ALU64 | BPF_MOD | BPF_K:
|
||||
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
|
||||
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
|
||||
*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
|
||||
break;
|
||||
|
||||
case BPF_JMP | BPF_JEQ | BPF_K:
|
||||
case BPF_JMP | BPF_JNE | BPF_K:
|
||||
case BPF_JMP | BPF_JGT | BPF_K:
|
||||
case BPF_JMP | BPF_JGE | BPF_K:
|
||||
case BPF_JMP | BPF_JSGT | BPF_K:
|
||||
case BPF_JMP | BPF_JSGE | BPF_K:
|
||||
case BPF_JMP | BPF_JSET | BPF_K:
|
||||
/* Accommodate for extra offset in case of a backjump. */
|
||||
off = from->off;
|
||||
if (off < 0)
|
||||
off -= 2;
|
||||
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
|
||||
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
|
||||
*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
|
||||
break;
|
||||
|
||||
case BPF_LD | BPF_ABS | BPF_W:
|
||||
case BPF_LD | BPF_ABS | BPF_H:
|
||||
case BPF_LD | BPF_ABS | BPF_B:
|
||||
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
|
||||
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
|
||||
*to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
|
||||
break;
|
||||
|
||||
case BPF_LD | BPF_IND | BPF_W:
|
||||
case BPF_LD | BPF_IND | BPF_H:
|
||||
case BPF_LD | BPF_IND | BPF_B:
|
||||
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
|
||||
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
|
||||
*to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
|
||||
*to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
|
||||
break;
|
||||
|
||||
case BPF_LD | BPF_IMM | BPF_DW:
|
||||
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
|
||||
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
|
||||
*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
|
||||
*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
|
||||
break;
|
||||
case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
|
||||
*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
|
||||
*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
|
||||
*to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
|
||||
break;
|
||||
|
||||
case BPF_ST | BPF_MEM | BPF_DW:
|
||||
case BPF_ST | BPF_MEM | BPF_W:
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
|
||||
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
|
||||
*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
|
||||
break;
|
||||
}
|
||||
out:
|
||||
return to - to_buff;
|
||||
}
|
||||
|
||||
static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
|
||||
gfp_t gfp_extra_flags)
|
||||
{
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
|
||||
gfp_extra_flags;
|
||||
struct bpf_prog *fp;
|
||||
|
||||
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
|
||||
if (fp != NULL) {
|
||||
kmemcheck_annotate_bitfield(fp, meta);
|
||||
|
||||
/* aux->prog still points to the fp_other one, so
|
||||
* when promoting the clone to the real program,
|
||||
* this still needs to be adapted.
|
||||
*/
|
||||
memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
|
||||
}
|
||||
|
||||
return fp;
|
||||
}
|
||||
|
||||
static void bpf_prog_clone_free(struct bpf_prog *fp)
|
||||
{
|
||||
/* aux was stolen by the other clone, so we cannot free
|
||||
* it from this path! It will be freed eventually by the
|
||||
* other program on release.
|
||||
*
|
||||
* At this point, we don't need a deferred release since
|
||||
* clone is guaranteed to not be locked.
|
||||
*/
|
||||
fp->aux = NULL;
|
||||
__bpf_prog_free(fp);
|
||||
}
|
||||
|
||||
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
|
||||
{
|
||||
/* We have to repoint aux->prog to self, as we don't
|
||||
* know whether fp here is the clone or the original.
|
||||
*/
|
||||
fp->aux->prog = fp;
|
||||
bpf_prog_clone_free(fp_other);
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_insn insn_buff[16], aux[2];
|
||||
struct bpf_prog *clone, *tmp;
|
||||
int insn_delta, insn_cnt;
|
||||
struct bpf_insn *insn;
|
||||
int i, rewritten;
|
||||
|
||||
if (!bpf_jit_blinding_enabled())
|
||||
return prog;
|
||||
|
||||
clone = bpf_prog_clone_create(prog, GFP_USER);
|
||||
if (!clone)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
insn_cnt = clone->len;
|
||||
insn = clone->insnsi;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
/* We temporarily need to hold the original ld64 insn
|
||||
* so that we can still access the first part in the
|
||||
* second blinding run.
|
||||
*/
|
||||
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
|
||||
insn[1].code == 0)
|
||||
memcpy(aux, insn, sizeof(aux));
|
||||
|
||||
rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
|
||||
if (!rewritten)
|
||||
continue;
|
||||
|
||||
tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
|
||||
if (!tmp) {
|
||||
/* Patching may have repointed aux->prog during
|
||||
* realloc from the original one, so we need to
|
||||
* fix it up here on error.
|
||||
*/
|
||||
bpf_jit_prog_release_other(prog, clone);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
clone = tmp;
|
||||
insn_delta = rewritten - 1;
|
||||
|
||||
/* Walk new program and skip insns we just inserted. */
|
||||
insn = clone->insnsi + i + insn_delta;
|
||||
insn_cnt += insn_delta;
|
||||
i += insn_delta;
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
|
||||
/* Base function for offset calculation. Needs to go into .text section,
|
||||
@@ -692,15 +964,22 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
|
||||
/**
|
||||
* bpf_prog_select_runtime - select exec runtime for BPF program
|
||||
* @fp: bpf_prog populated with internal BPF program
|
||||
* @err: pointer to error variable
|
||||
*
|
||||
* Try to JIT eBPF program, if JIT is not available, use interpreter.
|
||||
* The BPF program will be executed via BPF_PROG_RUN() macro.
|
||||
*/
|
||||
int bpf_prog_select_runtime(struct bpf_prog *fp)
|
||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
||||
{
|
||||
fp->bpf_func = (void *) __bpf_prog_run;
|
||||
|
||||
bpf_int_jit_compile(fp);
|
||||
/* eBPF JITs can rewrite the program in case constant
|
||||
* blinding is active. However, in case of error during
|
||||
* blinding, bpf_int_jit_compile() must always return a
|
||||
* valid program, which in this case would simply not
|
||||
* be JITed, but falls back to the interpreter.
|
||||
*/
|
||||
fp = bpf_int_jit_compile(fp);
|
||||
bpf_prog_lock_ro(fp);
|
||||
|
||||
/* The tail call compatibility check can only be done at
|
||||
@@ -708,7 +987,9 @@ int bpf_prog_select_runtime(struct bpf_prog *fp)
|
||||
* with JITed or non JITed program concatenations and not
|
||||
* all eBPF JITs might immediately support all features.
|
||||
*/
|
||||
return bpf_check_tail_call(fp);
|
||||
*err = bpf_check_tail_call(fp);
|
||||
|
||||
return fp;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
||||
|
||||
@@ -764,14 +1045,21 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
|
||||
const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
|
||||
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
|
||||
const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
|
||||
|
||||
const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
|
||||
const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
|
||||
const struct bpf_func_proto bpf_get_current_comm_proto __weak;
|
||||
|
||||
const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto * __weak bpf_get_event_output_proto(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Always built-in helper functions. */
|
||||
const struct bpf_func_proto bpf_tail_call_proto = {
|
||||
.func = NULL,
|
||||
@@ -783,8 +1071,14 @@ const struct bpf_func_proto bpf_tail_call_proto = {
|
||||
};
|
||||
|
||||
/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
|
||||
void __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
return prog;
|
||||
}
|
||||
|
||||
bool __weak bpf_helper_changes_skb_data(void *func)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
|
||||
|
@@ -163,17 +163,26 @@ static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
|
||||
struct task_struct *task = current;
|
||||
char *buf = (char *) (long) r1;
|
||||
|
||||
if (!task)
|
||||
return -EINVAL;
|
||||
if (unlikely(!task))
|
||||
goto err_clear;
|
||||
|
||||
strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
|
||||
strncpy(buf, task->comm, size);
|
||||
|
||||
/* Verifier guarantees that size > 0. For task->comm exceeding
|
||||
* size, guarantee that buf is %NUL-terminated. Unconditionally
|
||||
* done here to save the size test.
|
||||
*/
|
||||
buf[size - 1] = 0;
|
||||
return 0;
|
||||
err_clear:
|
||||
memset(buf, 0, size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_get_current_comm_proto = {
|
||||
.func = bpf_get_current_comm,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_STACK,
|
||||
.arg1_type = ARG_PTR_TO_RAW_STACK,
|
||||
.arg2_type = ARG_CONST_STACK_SIZE,
|
||||
};
|
||||
|
@@ -116,7 +116,7 @@ free_smap:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
|
||||
u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
|
||||
{
|
||||
struct pt_regs *regs = (struct pt_regs *) (long) r1;
|
||||
struct bpf_map *map = (struct bpf_map *) (long) r2;
|
||||
|
@@ -762,7 +762,7 @@ static int bpf_prog_load(union bpf_attr *attr)
|
||||
fixup_bpf_calls(prog);
|
||||
|
||||
/* eBPF program is ready to be JITed */
|
||||
err = bpf_prog_select_runtime(prog);
|
||||
prog = bpf_prog_select_runtime(prog, &err);
|
||||
if (err < 0)
|
||||
goto free_used_maps;
|
||||
|
||||
|
文件差异内容过多而无法显示
加载差异
@@ -7100,7 +7100,7 @@ int perf_swevent_get_recursion_context(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
|
||||
|
||||
inline void perf_swevent_put_recursion_context(int rctx)
|
||||
void perf_swevent_put_recursion_context(int rctx)
|
||||
{
|
||||
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
|
||||
|
||||
@@ -7362,7 +7362,26 @@ static int perf_tp_event_match(struct perf_event *event,
|
||||
return 1;
|
||||
}
|
||||
|
||||
void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
|
||||
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
|
||||
struct trace_event_call *call, u64 count,
|
||||
struct pt_regs *regs, struct hlist_head *head,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct bpf_prog *prog = call->prog;
|
||||
|
||||
if (prog) {
|
||||
*(struct pt_regs **)raw_data = regs;
|
||||
if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
perf_tp_event(call->event.type, count, raw_data, size, regs, head,
|
||||
rctx, task);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
|
||||
|
||||
void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
|
||||
struct pt_regs *regs, struct hlist_head *head, int rctx,
|
||||
struct task_struct *task)
|
||||
{
|
||||
@@ -7374,9 +7393,11 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
|
||||
.data = record,
|
||||
};
|
||||
|
||||
perf_sample_data_init(&data, addr, 0);
|
||||
perf_sample_data_init(&data, 0, 0);
|
||||
data.raw = &raw;
|
||||
|
||||
perf_trace_buf_update(record, event_type);
|
||||
|
||||
hlist_for_each_entry_rcu(event, head, hlist_entry) {
|
||||
if (perf_tp_event_match(event, &data, regs))
|
||||
perf_swevent_event(event, count, &data, regs);
|
||||
@@ -7461,6 +7482,7 @@ static void perf_event_free_filter(struct perf_event *event)
|
||||
|
||||
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
|
||||
{
|
||||
bool is_kprobe, is_tracepoint;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
if (event->attr.type != PERF_TYPE_TRACEPOINT)
|
||||
@@ -7469,20 +7491,31 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
|
||||
if (event->tp_event->prog)
|
||||
return -EEXIST;
|
||||
|
||||
if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE))
|
||||
/* bpf programs can only be attached to u/kprobes */
|
||||
is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
|
||||
is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
|
||||
if (!is_kprobe && !is_tracepoint)
|
||||
/* bpf programs can only be attached to u/kprobe or tracepoint */
|
||||
return -EINVAL;
|
||||
|
||||
prog = bpf_prog_get(prog_fd);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_KPROBE) {
|
||||
if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
|
||||
(is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
|
||||
/* valid fd, but invalid bpf program type */
|
||||
bpf_prog_put(prog);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_tracepoint) {
|
||||
int off = trace_event_get_offsets(event->tp_event);
|
||||
|
||||
if (prog->aux->max_ctx_offset > off) {
|
||||
bpf_prog_put(prog);
|
||||
return -EACCES;
|
||||
}
|
||||
}
|
||||
event->tp_event->prog = prog;
|
||||
|
||||
return 0;
|
||||
|
@@ -357,10 +357,6 @@ static int parse(struct nlattr *na, struct cpumask *mask)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||||
#define TASKSTATS_NEEDS_PADDING 1
|
||||
#endif
|
||||
|
||||
static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
|
||||
{
|
||||
struct nlattr *na, *ret;
|
||||
@@ -370,29 +366,6 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
|
||||
? TASKSTATS_TYPE_AGGR_PID
|
||||
: TASKSTATS_TYPE_AGGR_TGID;
|
||||
|
||||
/*
|
||||
* The taskstats structure is internally aligned on 8 byte
|
||||
* boundaries but the layout of the aggregrate reply, with
|
||||
* two NLA headers and the pid (each 4 bytes), actually
|
||||
* force the entire structure to be unaligned. This causes
|
||||
* the kernel to issue unaligned access warnings on some
|
||||
* architectures like ia64. Unfortunately, some software out there
|
||||
* doesn't properly unroll the NLA packet and assumes that the start
|
||||
* of the taskstats structure will always be 20 bytes from the start
|
||||
* of the netlink payload. Aligning the start of the taskstats
|
||||
* structure breaks this software, which we don't want. So, for now
|
||||
* the alignment only happens on architectures that require it
|
||||
* and those users will have to update to fixed versions of those
|
||||
* packages. Space is reserved in the packet only when needed.
|
||||
* This ifdef should be removed in several years e.g. 2012 once
|
||||
* we can be confident that fixed versions are installed on most
|
||||
* systems. We add the padding before the aggregate since the
|
||||
* aggregate is already a defined type.
|
||||
*/
|
||||
#ifdef TASKSTATS_NEEDS_PADDING
|
||||
if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
|
||||
goto err;
|
||||
#endif
|
||||
na = nla_nest_start(skb, aggr);
|
||||
if (!na)
|
||||
goto err;
|
||||
@@ -401,7 +374,8 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
|
||||
nla_nest_cancel(skb, na);
|
||||
goto err;
|
||||
}
|
||||
ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
|
||||
ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
|
||||
sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
|
||||
if (!ret) {
|
||||
nla_nest_cancel(skb, na);
|
||||
goto err;
|
||||
@@ -500,10 +474,9 @@ static size_t taskstats_packet_size(void)
|
||||
size_t size;
|
||||
|
||||
size = nla_total_size(sizeof(u32)) +
|
||||
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
|
||||
#ifdef TASKSTATS_NEEDS_PADDING
|
||||
size += nla_total_size(0); /* Padding for alignment */
|
||||
#endif
|
||||
nla_total_size_64bit(sizeof(struct taskstats)) +
|
||||
nla_total_size(0);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@@ -62,17 +62,21 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
|
||||
static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||
{
|
||||
void *dst = (void *) (long) r1;
|
||||
int size = (int) r2;
|
||||
int ret, size = (int) r2;
|
||||
void *unsafe_ptr = (void *) (long) r3;
|
||||
|
||||
return probe_kernel_read(dst, unsafe_ptr, size);
|
||||
ret = probe_kernel_read(dst, unsafe_ptr, size);
|
||||
if (unlikely(ret < 0))
|
||||
memset(dst, 0, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_probe_read_proto = {
|
||||
.func = bpf_probe_read,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_STACK,
|
||||
.arg1_type = ARG_PTR_TO_RAW_STACK,
|
||||
.arg2_type = ARG_CONST_STACK_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
};
|
||||
@@ -221,11 +225,12 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = {
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
|
||||
static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
|
||||
{
|
||||
struct pt_regs *regs = (struct pt_regs *) (long) r1;
|
||||
struct bpf_map *map = (struct bpf_map *) (long) r2;
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
u64 index = flags & BPF_F_INDEX_MASK;
|
||||
void *data = (void *) (long) r4;
|
||||
struct perf_sample_data sample_data;
|
||||
struct perf_event *event;
|
||||
@@ -235,6 +240,10 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
|
||||
.data = data,
|
||||
};
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
|
||||
return -EINVAL;
|
||||
if (index == BPF_F_CURRENT_CPU)
|
||||
index = raw_smp_processor_id();
|
||||
if (unlikely(index >= array->map.max_entries))
|
||||
return -E2BIG;
|
||||
|
||||
@@ -268,7 +277,34 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
|
||||
.arg5_type = ARG_CONST_STACK_SIZE,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
|
||||
static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
|
||||
|
||||
static u64 bpf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
|
||||
{
|
||||
struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
|
||||
|
||||
perf_fetch_caller_regs(regs);
|
||||
|
||||
return bpf_perf_event_output((long)regs, r2, flags, r4, size);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_event_output_proto = {
|
||||
.func = bpf_event_output,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_STACK,
|
||||
.arg5_type = ARG_CONST_STACK_SIZE,
|
||||
};
|
||||
|
||||
const struct bpf_func_proto *bpf_get_event_output_proto(void)
|
||||
{
|
||||
return &bpf_event_output_proto;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_map_lookup_elem:
|
||||
@@ -295,12 +331,20 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
|
||||
return &bpf_get_smp_processor_id_proto;
|
||||
case BPF_FUNC_perf_event_read:
|
||||
return &bpf_perf_event_read_proto;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_perf_event_output:
|
||||
return &bpf_perf_event_output_proto;
|
||||
case BPF_FUNC_get_stackid:
|
||||
return &bpf_get_stackid_proto;
|
||||
default:
|
||||
return NULL;
|
||||
return tracing_func_proto(func_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -332,9 +376,82 @@ static struct bpf_prog_type_list kprobe_tl = {
|
||||
.type = BPF_PROG_TYPE_KPROBE,
|
||||
};
|
||||
|
||||
static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
|
||||
{
|
||||
/*
|
||||
* r1 points to perf tracepoint buffer where first 8 bytes are hidden
|
||||
* from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
|
||||
* from there and call the same bpf_perf_event_output() helper
|
||||
*/
|
||||
u64 ctx = *(long *)(uintptr_t)r1;
|
||||
|
||||
return bpf_perf_event_output(ctx, r2, index, r4, size);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
|
||||
.func = bpf_perf_event_output_tp,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_STACK,
|
||||
.arg5_type = ARG_CONST_STACK_SIZE,
|
||||
};
|
||||
|
||||
static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||
{
|
||||
u64 ctx = *(long *)(uintptr_t)r1;
|
||||
|
||||
return bpf_get_stackid(ctx, r2, r3, r4, r5);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
|
||||
.func = bpf_get_stackid_tp,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_perf_event_output:
|
||||
return &bpf_perf_event_output_proto_tp;
|
||||
case BPF_FUNC_get_stackid:
|
||||
return &bpf_get_stackid_proto_tp;
|
||||
default:
|
||||
return tracing_func_proto(func_id);
|
||||
}
|
||||
}
|
||||
|
||||
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type)
|
||||
{
|
||||
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
|
||||
return false;
|
||||
if (type != BPF_READ)
|
||||
return false;
|
||||
if (off % size != 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct bpf_verifier_ops tracepoint_prog_ops = {
|
||||
.get_func_proto = tp_prog_func_proto,
|
||||
.is_valid_access = tp_prog_is_valid_access,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list tracepoint_tl = {
|
||||
.ops = &tracepoint_prog_ops,
|
||||
.type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
};
|
||||
|
||||
static int __init register_kprobe_prog_ops(void)
|
||||
{
|
||||
bpf_register_prog_type(&kprobe_tl);
|
||||
bpf_register_prog_type(&tracepoint_tl);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(register_kprobe_prog_ops);
|
||||
|
@@ -263,42 +263,43 @@ void perf_trace_del(struct perf_event *p_event, int flags)
|
||||
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
|
||||
}
|
||||
|
||||
void *perf_trace_buf_prepare(int size, unsigned short type,
|
||||
struct pt_regs **regs, int *rctxp)
|
||||
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
|
||||
{
|
||||
struct trace_entry *entry;
|
||||
unsigned long flags;
|
||||
char *raw_data;
|
||||
int pc;
|
||||
int rctx;
|
||||
|
||||
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
|
||||
|
||||
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
||||
"perf buffer not large enough"))
|
||||
"perf buffer not large enough"))
|
||||
return NULL;
|
||||
|
||||
pc = preempt_count();
|
||||
|
||||
*rctxp = perf_swevent_get_recursion_context();
|
||||
if (*rctxp < 0)
|
||||
*rctxp = rctx = perf_swevent_get_recursion_context();
|
||||
if (rctx < 0)
|
||||
return NULL;
|
||||
|
||||
if (regs)
|
||||
*regs = this_cpu_ptr(&__perf_regs[*rctxp]);
|
||||
raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
|
||||
*regs = this_cpu_ptr(&__perf_regs[rctx]);
|
||||
raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
|
||||
|
||||
/* zero the dead bytes from align to not leak stack to user */
|
||||
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
|
||||
return raw_data;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
|
||||
NOKPROBE_SYMBOL(perf_trace_buf_alloc);
|
||||
|
||||
void perf_trace_buf_update(void *record, u16 type)
|
||||
{
|
||||
struct trace_entry *entry = record;
|
||||
int pc = preempt_count();
|
||||
unsigned long flags;
|
||||
|
||||
entry = (struct trace_entry *)raw_data;
|
||||
local_save_flags(flags);
|
||||
tracing_generic_entry_update(entry, flags, pc);
|
||||
entry->type = type;
|
||||
|
||||
return raw_data;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
|
||||
NOKPROBE_SYMBOL(perf_trace_buf_prepare);
|
||||
NOKPROBE_SYMBOL(perf_trace_buf_update);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
static void
|
||||
@@ -319,15 +320,16 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
|
||||
|
||||
BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
|
||||
|
||||
memset(®s, 0, sizeof(regs));
|
||||
perf_fetch_caller_regs(®s);
|
||||
|
||||
entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
|
||||
entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
entry->ip = ip;
|
||||
entry->parent_ip = parent_ip;
|
||||
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
|
||||
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
|
||||
1, ®s, head, NULL);
|
||||
|
||||
#undef ENTRY_SIZE
|
||||
|
@@ -204,6 +204,24 @@ static void trace_destroy_fields(struct trace_event_call *call)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* run-time version of trace_event_get_offsets_<call>() that returns the last
|
||||
* accessible offset of trace fields excluding __dynamic_array bytes
|
||||
*/
|
||||
int trace_event_get_offsets(struct trace_event_call *call)
|
||||
{
|
||||
struct ftrace_event_field *tail;
|
||||
struct list_head *head;
|
||||
|
||||
head = trace_get_fields(call);
|
||||
/*
|
||||
* head->next points to the last field with the largest offset,
|
||||
* since it was added last by trace_define_field()
|
||||
*/
|
||||
tail = list_first_entry(head, struct ftrace_event_field, link);
|
||||
return tail->offset + tail->size;
|
||||
}
|
||||
|
||||
int trace_event_raw_init(struct trace_event_call *call)
|
||||
{
|
||||
int id;
|
||||
|
@@ -1149,14 +1149,15 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
|
||||
entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
|
||||
entry = perf_trace_buf_alloc(size, NULL, &rctx);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
entry->ip = (unsigned long)tk->rp.kp.addr;
|
||||
memset(&entry[1], 0, dsize);
|
||||
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
|
||||
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
|
||||
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
|
||||
head, NULL);
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_perf_func);
|
||||
|
||||
@@ -1184,14 +1185,15 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
|
||||
entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
|
||||
entry = perf_trace_buf_alloc(size, NULL, &rctx);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
entry->func = (unsigned long)tk->rp.kp.addr;
|
||||
entry->ret_ip = (unsigned long)ri->ret_addr;
|
||||
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
|
||||
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
|
||||
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
|
||||
head, NULL);
|
||||
}
|
||||
NOKPROBE_SYMBOL(kretprobe_perf_func);
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
@@ -587,15 +587,16 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
|
||||
size = ALIGN(size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
|
||||
rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
|
||||
sys_data->enter_event->event.type, NULL, &rctx);
|
||||
rec = perf_trace_buf_alloc(size, NULL, &rctx);
|
||||
if (!rec)
|
||||
return;
|
||||
|
||||
rec->nr = syscall_nr;
|
||||
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
|
||||
(unsigned long *)&rec->args);
|
||||
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
|
||||
perf_trace_buf_submit(rec, size, rctx,
|
||||
sys_data->enter_event->event.type, 1, regs,
|
||||
head, NULL);
|
||||
}
|
||||
|
||||
static int perf_sysenter_enable(struct trace_event_call *call)
|
||||
@@ -660,14 +661,14 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
|
||||
size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
|
||||
rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
|
||||
sys_data->exit_event->event.type, NULL, &rctx);
|
||||
rec = perf_trace_buf_alloc(size, NULL, &rctx);
|
||||
if (!rec)
|
||||
return;
|
||||
|
||||
rec->nr = syscall_nr;
|
||||
rec->ret = syscall_get_return_value(current, regs);
|
||||
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
|
||||
perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
|
||||
1, regs, head, NULL);
|
||||
}
|
||||
|
||||
static int perf_sysexit_enable(struct trace_event_call *call)
|
||||
|
@@ -1131,7 +1131,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
|
||||
if (hlist_empty(head))
|
||||
goto out;
|
||||
|
||||
entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
|
||||
entry = perf_trace_buf_alloc(size, NULL, &rctx);
|
||||
if (!entry)
|
||||
goto out;
|
||||
|
||||
@@ -1152,7 +1152,8 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
|
||||
memset(data + len, 0, size - esize - len);
|
||||
}
|
||||
|
||||
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
|
||||
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
|
||||
head, NULL);
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
在新工单中引用
屏蔽一个用户