mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
b8af417e4d
Daniel Borkmann says: ==================== pull-request: bpf-next 2021-02-16 The following pull-request contains BPF updates for your *net-next* tree. There's a small merge conflict between7eeba1706e
("tcp: Add receive timestamp support for receive zerocopy.") from net-next tree and9cacf81f81
("bpf: Remove extra lock_sock for TCP_ZEROCOPY_RECEIVE") from bpf-next tree. Resolve as follows: [...] lock_sock(sk); err = tcp_zerocopy_receive(sk, &zc, &tss); err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, &zc, &len, err); release_sock(sk); [...] We've added 116 non-merge commits during the last 27 day(s) which contain a total of 156 files changed, 5662 insertions(+), 1489 deletions(-). The main changes are: 1) Adds support of pointers to types with known size among global function args to overcome the limit on max # of allowed args, from Dmitrii Banshchikov. 2) Add bpf_iter for task_vma which can be used to generate information similar to /proc/pid/maps, from Song Liu. 3) Enable bpf_{g,s}etsockopt() from all sock_addr related program hooks. Allow rewriting bind user ports from BPF side below the ip_unprivileged_port_start range, both from Stanislav Fomichev. 4) Prevent recursion on fentry/fexit & sleepable programs and allow map-in-map as well as per-cpu maps for the latter, from Alexei Starovoitov. 5) Add selftest script to run BPF CI locally. Also enable BPF ringbuffer for sleepable programs, both from KP Singh. 6) Extend verifier to enable variable offset read/write access to the BPF program stack, from Andrei Matei. 7) Improve tc & XDP MTU handling and add a new bpf_check_mtu() helper to query device MTU from programs, from Jesper Dangaard Brouer. 8) Allow bpf_get_socket_cookie() helper also be called from [sleepable] BPF tracing programs, from Florent Revest. 9) Extend x86 JIT to pad JMPs with NOPs for helping image to converge when otherwise too many passes are required, from Gary Lin. 10) Verifier fixes on atomics with BPF_FETCH as well as function-by-function verification both related to zero-extension handling, from Ilya Leoshkevich. 11) Better kernel build integration of resolve_btfids tool, from Jiri Olsa. 12) Batch of AF_XDP selftest cleanups and small performance improvement for libbpf's xsk map redirect for newer kernels, from Björn Töpel. 13) Follow-up BPF doc and verifier improvements around atomics with BPF_FETCH, from Brendan Jackman. 14) Permit zero-sized data sections e.g. if ELF .rodata section contains read-only data from local variables, from Yonghong Song. 15) veth driver skb bulk-allocation for ndo_xdp_xmit, from Lorenzo Bianconi. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
71 lines
2.2 KiB
C
71 lines
2.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
|
|
#define _LINUX_INDIRECT_CALL_WRAPPER_H
|
|
|
|
#ifdef CONFIG_RETPOLINE
|
|
|
|
/*
|
|
* INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
|
|
* @f: function pointer
|
|
* @f$NR: builtin functions names, up to $NR of them
|
|
* @__VA_ARGS__: arguments for @f
|
|
*
|
|
* Avoid retpoline overhead for known builtin, checking @f vs each of them and
|
|
* eventually invoking directly the builtin function. The functions are check
|
|
* in the given order. Fallback to the indirect call.
|
|
*/
|
|
#define INDIRECT_CALL_1(f, f1, ...) \
|
|
({ \
|
|
likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
|
|
})
|
|
#define INDIRECT_CALL_2(f, f2, f1, ...) \
|
|
({ \
|
|
likely(f == f2) ? f2(__VA_ARGS__) : \
|
|
INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
|
|
})
|
|
#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
|
|
({ \
|
|
likely(f == f3) ? f3(__VA_ARGS__) : \
|
|
INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
|
|
})
|
|
#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
|
|
({ \
|
|
likely(f == f4) ? f4(__VA_ARGS__) : \
|
|
INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
|
|
})
|
|
|
|
#define INDIRECT_CALLABLE_DECLARE(f) f
|
|
#define INDIRECT_CALLABLE_SCOPE
|
|
#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f)
|
|
|
|
#else
|
|
#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
|
|
#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
|
|
#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__)
|
|
#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
|
|
#define INDIRECT_CALLABLE_DECLARE(f)
|
|
#define INDIRECT_CALLABLE_SCOPE static
|
|
#define EXPORT_INDIRECT_CALLABLE(f)
|
|
#endif
|
|
|
|
/*
|
|
* We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is
|
|
* builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6
|
|
* alternatives
|
|
*/
|
|
#if IS_BUILTIN(CONFIG_IPV6)
|
|
#define INDIRECT_CALL_INET(f, f2, f1, ...) \
|
|
INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
|
|
#elif IS_ENABLED(CONFIG_INET)
|
|
#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
|
|
#else
|
|
#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
|
|
#endif
|
|
|
|
#if IS_ENABLED(CONFIG_INET)
|
|
#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
|
|
#else
|
|
#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
|
|
#endif
|
|
|
|
#endif
|