powerpc/bpf: Use _Rn macros for GPRs

Use _Rn macros to specify register names to make their usage clear.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/7df626b8cdc6141d4295ac16137c82ad570b6637.1644834730.git.naveen.n.rao@linux.vnet.ibm.com
This commit is contained in:
Naveen N. Rao 2022-02-14 16:11:49 +05:30 committed by Michael Ellerman
parent 576a6c3a00
commit 036d559c0b
2 changed files with 49 additions and 49 deletions

View File

@ -41,23 +41,23 @@
/* BPF to ppc register mappings */ /* BPF to ppc register mappings */
const int b2p[MAX_BPF_JIT_REG + 1] = { const int b2p[MAX_BPF_JIT_REG + 1] = {
/* function return value */ /* function return value */
[BPF_REG_0] = 12, [BPF_REG_0] = _R12,
/* function arguments */ /* function arguments */
[BPF_REG_1] = 4, [BPF_REG_1] = _R4,
[BPF_REG_2] = 6, [BPF_REG_2] = _R6,
[BPF_REG_3] = 8, [BPF_REG_3] = _R8,
[BPF_REG_4] = 10, [BPF_REG_4] = _R10,
[BPF_REG_5] = 22, [BPF_REG_5] = _R22,
/* non volatile registers */ /* non volatile registers */
[BPF_REG_6] = 24, [BPF_REG_6] = _R24,
[BPF_REG_7] = 26, [BPF_REG_7] = _R26,
[BPF_REG_8] = 28, [BPF_REG_8] = _R28,
[BPF_REG_9] = 30, [BPF_REG_9] = _R30,
/* frame pointer aka BPF_REG_10 */ /* frame pointer aka BPF_REG_10 */
[BPF_REG_FP] = 18, [BPF_REG_FP] = _R18,
/* eBPF jit internal registers */ /* eBPF jit internal registers */
[BPF_REG_AX] = 20, [BPF_REG_AX] = _R20,
[TMP_REG] = 31, /* 32 bits */ [TMP_REG] = _R31, /* 32 bits */
}; };
static int bpf_to_ppc(struct codegen_context *ctx, int reg) static int bpf_to_ppc(struct codegen_context *ctx, int reg)
@ -66,8 +66,8 @@ static int bpf_to_ppc(struct codegen_context *ctx, int reg)
} }
/* PPC NVR range -- update this if we ever use NVRs below r17 */ /* PPC NVR range -- update this if we ever use NVRs below r17 */
#define BPF_PPC_NVR_MIN 17 #define BPF_PPC_NVR_MIN _R17
#define BPF_PPC_TC 16 #define BPF_PPC_TC _R16
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
{ {

View File

@ -48,28 +48,28 @@
/* BPF to ppc register mappings */ /* BPF to ppc register mappings */
const int b2p[MAX_BPF_JIT_REG + 2] = { const int b2p[MAX_BPF_JIT_REG + 2] = {
/* function return value */ /* function return value */
[BPF_REG_0] = 8, [BPF_REG_0] = _R8,
/* function arguments */ /* function arguments */
[BPF_REG_1] = 3, [BPF_REG_1] = _R3,
[BPF_REG_2] = 4, [BPF_REG_2] = _R4,
[BPF_REG_3] = 5, [BPF_REG_3] = _R5,
[BPF_REG_4] = 6, [BPF_REG_4] = _R6,
[BPF_REG_5] = 7, [BPF_REG_5] = _R7,
/* non volatile registers */ /* non volatile registers */
[BPF_REG_6] = 27, [BPF_REG_6] = _R27,
[BPF_REG_7] = 28, [BPF_REG_7] = _R28,
[BPF_REG_8] = 29, [BPF_REG_8] = _R29,
[BPF_REG_9] = 30, [BPF_REG_9] = _R30,
/* frame pointer aka BPF_REG_10 */ /* frame pointer aka BPF_REG_10 */
[BPF_REG_FP] = 31, [BPF_REG_FP] = _R31,
/* eBPF jit internal registers */ /* eBPF jit internal registers */
[BPF_REG_AX] = 12, [BPF_REG_AX] = _R12,
[TMP_REG_1] = 9, [TMP_REG_1] = _R9,
[TMP_REG_2] = 10 [TMP_REG_2] = _R10
}; };
/* PPC NVR range -- update this if we ever use NVRs below r27 */ /* PPC NVR range -- update this if we ever use NVRs below r27 */
#define BPF_PPC_NVR_MIN 27 #define BPF_PPC_NVR_MIN _R27
static inline bool bpf_has_stack_frame(struct codegen_context *ctx) static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
{ {
@ -136,7 +136,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
if (ctx->seen & SEEN_TAILCALL) { if (ctx->seen & SEEN_TAILCALL) {
EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0)); EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
/* this goes in the redzone */ /* this goes in the redzone */
EMIT(PPC_RAW_STD(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8))); EMIT(PPC_RAW_STD(b2p[TMP_REG_1], _R1, -(BPF_PPC_STACK_SAVE + 8)));
} else { } else {
EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP());
EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP());
@ -149,10 +149,10 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
*/ */
if (ctx->seen & SEEN_FUNC) { if (ctx->seen & SEEN_FUNC) {
EMIT(PPC_RAW_MFLR(_R0)); EMIT(PPC_RAW_MFLR(_R0));
EMIT(PPC_RAW_STD(0, 1, PPC_LR_STKOFF)); EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
} }
EMIT(PPC_RAW_STDU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size))); EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
} }
/* /*
@ -162,11 +162,11 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
*/ */
for (i = BPF_REG_6; i <= BPF_REG_10; i++) for (i = BPF_REG_6; i <= BPF_REG_10; i++)
if (bpf_is_seen_register(ctx, b2p[i])) if (bpf_is_seen_register(ctx, b2p[i]))
EMIT(PPC_RAW_STD(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]))); EMIT(PPC_RAW_STD(b2p[i], _R1, bpf_jit_stack_offsetof(ctx, b2p[i])));
/* Setup frame pointer to point to the bpf stack area */ /* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP])) if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1, EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], _R1,
STACK_FRAME_MIN_SIZE + ctx->stack_size)); STACK_FRAME_MIN_SIZE + ctx->stack_size));
} }
@ -177,14 +177,14 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Restore NVRs */ /* Restore NVRs */
for (i = BPF_REG_6; i <= BPF_REG_10; i++) for (i = BPF_REG_6; i <= BPF_REG_10; i++)
if (bpf_is_seen_register(ctx, b2p[i])) if (bpf_is_seen_register(ctx, b2p[i]))
EMIT(PPC_RAW_LD(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]))); EMIT(PPC_RAW_LD(b2p[i], _R1, bpf_jit_stack_offsetof(ctx, b2p[i])));
/* Tear down our stack frame */ /* Tear down our stack frame */
if (bpf_has_stack_frame(ctx)) { if (bpf_has_stack_frame(ctx)) {
EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size)); EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
if (ctx->seen & SEEN_FUNC) { if (ctx->seen & SEEN_FUNC) {
EMIT(PPC_RAW_LD(0, 1, PPC_LR_STKOFF)); EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
EMIT(PPC_RAW_MTLR(0)); EMIT(PPC_RAW_MTLR(_R0));
} }
} }
} }
@ -194,7 +194,7 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
bpf_jit_emit_common_epilogue(image, ctx); bpf_jit_emit_common_epilogue(image, ctx);
/* Move result to r3 */ /* Move result to r3 */
EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0])); EMIT(PPC_RAW_MR(_R3, b2p[BPF_REG_0]));
EMIT(PPC_RAW_BLR()); EMIT(PPC_RAW_BLR());
} }
@ -232,7 +232,7 @@ int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func
func += FUNCTION_DESCR_SIZE; func += FUNCTION_DESCR_SIZE;
/* Load function address into r12 */ /* Load function address into r12 */
PPC_LI64(12, func); PPC_LI64(_R12, func);
/* For bpf-to-bpf function calls, the callee's address is unknown /* For bpf-to-bpf function calls, the callee's address is unknown
* until the last extra pass. As seen above, we use PPC_LI64() to * until the last extra pass. As seen above, we use PPC_LI64() to
@ -247,7 +247,7 @@ int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func
for (i = ctx->idx - ctx_idx; i < 5; i++) for (i = ctx->idx - ctx_idx; i < 5; i++)
EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP());
EMIT(PPC_RAW_MTCTR(12)); EMIT(PPC_RAW_MTCTR(_R12));
EMIT(PPC_RAW_BCTRL()); EMIT(PPC_RAW_BCTRL());
return 0; return 0;
@ -281,7 +281,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
* if (tail_call_cnt >= MAX_TAIL_CALL_CNT) * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
* goto out; * goto out;
*/ */
EMIT(PPC_RAW_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx))); EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R1, bpf_jit_stack_tailcallcnt(ctx)));
EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT)); EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
PPC_BCC_SHORT(COND_GE, out); PPC_BCC_SHORT(COND_GE, out);
@ -289,7 +289,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
* tail_call_cnt++; * tail_call_cnt++;
*/ */
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1)); EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
EMIT(PPC_RAW_STD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx))); EMIT(PPC_RAW_STD(b2p[TMP_REG_1], _R1, bpf_jit_stack_tailcallcnt(ctx)));
/* prog = array->ptrs[index]; */ /* prog = array->ptrs[index]; */
EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8)); EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
@ -680,8 +680,8 @@ bpf_alu32_trunc:
break; break;
case 64: case 64:
/* Store the value to stack and then use byte-reverse loads */ /* Store the value to stack and then use byte-reverse loads */
EMIT(PPC_RAW_STD(dst_reg, 1, bpf_jit_stack_local(ctx))); EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx))); EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], _R1, bpf_jit_stack_local(ctx)));
if (cpu_has_feature(CPU_FTR_ARCH_206)) { if (cpu_has_feature(CPU_FTR_ARCH_206)) {
EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
} else { } else {
@ -736,8 +736,8 @@ emit_clear:
break; break;
case STF_BARRIER_FALLBACK: case STF_BARRIER_FALLBACK:
ctx->seen |= SEEN_FUNC; ctx->seen |= SEEN_FUNC;
PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier)); PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
EMIT(PPC_RAW_MTCTR(12)); EMIT(PPC_RAW_MTCTR(_R12));
EMIT(PPC_RAW_BCTRL()); EMIT(PPC_RAW_BCTRL());
break; break;
case STF_BARRIER_NONE: case STF_BARRIER_NONE:
@ -952,7 +952,7 @@ emit_clear:
return ret; return ret;
/* move return value from r3 to BPF_REG_0 */ /* move return value from r3 to BPF_REG_0 */
EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3)); EMIT(PPC_RAW_MR(b2p[BPF_REG_0], _R3));
break; break;
/* /*