bpf, arm64: remove ld_abs/ld_ind
Since LD_ABS/LD_IND instructions are now removed from the core and reimplemented through a combination of inlined BPF instructions and a slow-path helper, we can get rid of the complexity from arm64 JIT. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Родитель
e782bdcf58
Коммит
816d9ef32a
|
@ -723,71 +723,6 @@ emit_cond_jmp:
|
|||
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
|
||||
break;
|
||||
|
||||
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
|
||||
case BPF_LD | BPF_ABS | BPF_W:
|
||||
case BPF_LD | BPF_ABS | BPF_H:
|
||||
case BPF_LD | BPF_ABS | BPF_B:
|
||||
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
|
||||
case BPF_LD | BPF_IND | BPF_W:
|
||||
case BPF_LD | BPF_IND | BPF_H:
|
||||
case BPF_LD | BPF_IND | BPF_B:
|
||||
{
|
||||
const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
|
||||
const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
|
||||
const u8 fp = bpf2a64[BPF_REG_FP];
|
||||
const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
|
||||
const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
|
||||
const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
|
||||
const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
|
||||
const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
|
||||
int size;
|
||||
|
||||
emit(A64_MOV(1, r1, r6), ctx);
|
||||
emit_a64_mov_i(0, r2, imm, ctx);
|
||||
if (BPF_MODE(code) == BPF_IND)
|
||||
emit(A64_ADD(0, r2, r2, src), ctx);
|
||||
switch (BPF_SIZE(code)) {
|
||||
case BPF_W:
|
||||
size = 4;
|
||||
break;
|
||||
case BPF_H:
|
||||
size = 2;
|
||||
break;
|
||||
case BPF_B:
|
||||
size = 1;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
emit_a64_mov_i64(r3, size, ctx);
|
||||
emit(A64_SUB_I(1, r4, fp, ctx->stack_size), ctx);
|
||||
emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
|
||||
emit(A64_BLR(r5), ctx);
|
||||
emit(A64_MOV(1, r0, A64_R(0)), ctx);
|
||||
|
||||
jmp_offset = epilogue_offset(ctx);
|
||||
check_imm19(jmp_offset);
|
||||
emit(A64_CBZ(1, r0, jmp_offset), ctx);
|
||||
emit(A64_MOV(1, r5, r0), ctx);
|
||||
switch (BPF_SIZE(code)) {
|
||||
case BPF_W:
|
||||
emit(A64_LDR32(r0, r5, A64_ZR), ctx);
|
||||
#ifndef CONFIG_CPU_BIG_ENDIAN
|
||||
emit(A64_REV32(0, r0, r0), ctx);
|
||||
#endif
|
||||
break;
|
||||
case BPF_H:
|
||||
emit(A64_LDRH(r0, r5, A64_ZR), ctx);
|
||||
#ifndef CONFIG_CPU_BIG_ENDIAN
|
||||
emit(A64_REV16(0, r0, r0), ctx);
|
||||
#endif
|
||||
break;
|
||||
case BPF_B:
|
||||
emit(A64_LDRB(r0, r5, A64_ZR), ctx);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
pr_err_once("unknown opcode %02x\n", code);
|
||||
return -EINVAL;
|
||||
|
|
Загрузка…
Ссылка в новой задаче