bpf, ppc64: remove ld_abs/ld_ind
Since LD_ABS/LD_IND instructions are now removed from the core and reimplemented through a combination of inlined BPF instructions and a slow-path helper, we can get rid of the complexity from ppc64 JIT. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Tested-by: Sandipan Das <sandipan@linux.vnet.ibm.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:

committed by
Alexei Starovoitov

parent
4db25cc988
commit
dbf44daf7c
@@ -20,7 +20,7 @@
|
||||
* with our redzone usage.
|
||||
*
|
||||
* [ prev sp ] <-------------
|
||||
* [ nv gpr save area ] 8*8 |
|
||||
* [ nv gpr save area ] 6*8 |
|
||||
* [ tail_call_cnt ] 8 |
|
||||
* [ local_tmp_var ] 8 |
|
||||
* fp (r31) --> [ ebpf stack space ] upto 512 |
|
||||
@@ -28,8 +28,8 @@
|
||||
* sp (r1) ---> [ stack pointer ] --------------
|
||||
*/
|
||||
|
||||
/* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
|
||||
#define BPF_PPC_STACK_SAVE (8*8)
|
||||
/* for gpr non volatile registers BPG_REG_6 to 10 */
|
||||
#define BPF_PPC_STACK_SAVE (6*8)
|
||||
/* for bpf JIT code internal usage */
|
||||
#define BPF_PPC_STACK_LOCALS 16
|
||||
/* stack frame excluding BPF stack, ensure this is quadword aligned */
|
||||
@@ -39,10 +39,8 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* BPF register usage */
|
||||
#define SKB_HLEN_REG (MAX_BPF_JIT_REG + 0)
|
||||
#define SKB_DATA_REG (MAX_BPF_JIT_REG + 1)
|
||||
#define TMP_REG_1 (MAX_BPF_JIT_REG + 2)
|
||||
#define TMP_REG_2 (MAX_BPF_JIT_REG + 3)
|
||||
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
|
||||
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
|
||||
|
||||
/* BPF to ppc register mappings */
|
||||
static const int b2p[] = {
|
||||
@@ -63,40 +61,23 @@ static const int b2p[] = {
|
||||
[BPF_REG_FP] = 31,
|
||||
/* eBPF jit internal registers */
|
||||
[BPF_REG_AX] = 2,
|
||||
[SKB_HLEN_REG] = 25,
|
||||
[SKB_DATA_REG] = 26,
|
||||
[TMP_REG_1] = 9,
|
||||
[TMP_REG_2] = 10
|
||||
};
|
||||
|
||||
/* PPC NVR range -- update this if we ever use NVRs below r24 */
|
||||
#define BPF_PPC_NVR_MIN 24
|
||||
|
||||
/* Assembly helpers */
|
||||
#define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
|
||||
u64 func##_negative_offset(u64 r3, u64 r4); \
|
||||
u64 func##_positive_offset(u64 r3, u64 r4);
|
||||
|
||||
DECLARE_LOAD_FUNC(sk_load_word);
|
||||
DECLARE_LOAD_FUNC(sk_load_half);
|
||||
DECLARE_LOAD_FUNC(sk_load_byte);
|
||||
|
||||
#define CHOOSE_LOAD_FUNC(imm, func) \
|
||||
(imm < 0 ? \
|
||||
(imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
|
||||
func##_positive_offset)
|
||||
/* PPC NVR range -- update this if we ever use NVRs below r27 */
|
||||
#define BPF_PPC_NVR_MIN 27
|
||||
|
||||
#define SEEN_FUNC 0x1000 /* might call external helpers */
|
||||
#define SEEN_STACK 0x2000 /* uses BPF stack */
|
||||
#define SEEN_SKB 0x4000 /* uses sk_buff */
|
||||
#define SEEN_TAILCALL 0x8000 /* uses tail calls */
|
||||
#define SEEN_TAILCALL 0x4000 /* uses tail calls */
|
||||
|
||||
struct codegen_context {
|
||||
/*
|
||||
* This is used to track register usage as well
|
||||
* as calls to external helpers.
|
||||
* - register usage is tracked with corresponding
|
||||
* bits (r3-r10 and r25-r31)
|
||||
* bits (r3-r10 and r27-r31)
|
||||
* - rest of the bits can be used to track other
|
||||
* things -- for now, we use bits 16 to 23
|
||||
* encoded in SEEN_* macros above
|
||||
|
Reference in New Issue
Block a user