libbpf: Remove gcc support for bpf_tail_call_static for now

This reverts commit 14e5ce7994 ("libbpf: Add GCC support for
bpf_tail_call_static"). Reason is that gcc invented their own BPF asm
which is not conform with LLVM one, and going forward this would be
more painful to maintain here and in other areas of the library. Thus
remove it; ask to gcc folks is to align with LLVM one to use exact
same syntax.

Fixes: 14e5ce7994 ("libbpf: Add GCC support for bpf_tail_call_static")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Cc: James Hilliard <james.hilliard1@gmail.com>
Cc: Jose E. Marchesi <jose.marchesi@oracle.com>
This commit is contained in:
Daniel Borkmann 2022-09-09 16:15:11 +02:00
parent b239da3420
commit 665f5d3577
1 changed files with 6 additions and 13 deletions

View File

@ -131,7 +131,7 @@
/*
* Helper function to perform a tail call with a constant/immediate map slot.
*/
#if (!defined(__clang__) || __clang_major__ >= 8) && defined(__bpf__)
#if __clang_major__ >= 8 && defined(__bpf__)
static __always_inline void
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
{
@ -139,8 +139,8 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
__bpf_unreachable();
/*
* Provide a hard guarantee that the compiler won't optimize setting r2
* (map pointer) and r3 (constant map index) from _different paths_ ending
* Provide a hard guarantee that LLVM won't optimize setting r2 (map
* pointer) and r3 (constant map index) from _different paths_ ending
* up at the _same_ call insn as otherwise we won't be able to use the
* jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
* given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
@ -148,19 +148,12 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
*
* Note on clobber list: we need to stay in-line with BPF calling
* convention, so even if we don't end up using r0, r4, r5, we need
* to mark them as clobber so that the compiler doesn't end up using
* them before / after the call.
* to mark them as clobber so that LLVM doesn't end up using them
* before / after the call.
*/
asm volatile(
#ifdef __clang__
"r1 = %[ctx]\n\t"
asm volatile("r1 = %[ctx]\n\t"
"r2 = %[map]\n\t"
"r3 = %[slot]\n\t"
#else
"mov %%r1,%[ctx]\n\t"
"mov %%r2,%[map]\n\t"
"mov %%r3,%[slot]\n\t"
#endif
"call 12"
:: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
: "r0", "r1", "r2", "r3", "r4", "r5");