Merge branch 'bpf: Allow reads from uninit stack'

Eduard Zingerman says:

====================

This patch-set modifies BPF verifier to accept programs that read from
uninitialized stack locations, but only if executed in privileged mode.
This provides significant verification performance gains: 30% to 70% less
processed states for big number of test programs.

The reason for performance gains comes from treating STACK_MISC and
STACK_INVALID as compatible, when cached state is compared to current state
in verifier.c:stacksafe().

The change should not affect safety, because any value read from STACK_MISC
location has full binary range (e.g. 0x00-0xff for byte-sized reads).

Details and measurements are provided in the description for the patch #1.

The change was suggested by Andrii Nakryiko, the initial patch was created
by Alexei Starovoitov. The discussion could be found at [1].

Changes v1 -> v2 (v1 available at [2]):
- Calls to helper functions now convert STACK_INVALID to STACK_MISC
  (suggested by Andrii);
- The test case progs/test_global_func10.c is updated to expect new
  error message. Before recent commit [3] exact content of error
  messages was not verified for this test.
- Replaced incorrect '//'-style comments in test case asm blocks by
  '/*...*/'-style comments in order to fix compilation issues;
- Changed the tag from "Suggested-By" to "Co-developed-by" for Alexei
  on patch #1, please let me know if this is appropriate use of the tag.

[1] https://lore.kernel.org/bpf/CAADnVQKs2i1iuZ5SUGuJtxWVfGYR9kDgYKhq3rNV+kBLQCu7rA@mail.gmail.com/
[2] https://lore.kernel.org/bpf/20230216183606.2483834-1-eddyz87@gmail.com/
[3] 95ebb37617 ("selftests/bpf: Convert test_global_funcs test to test_loader framework")
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov 2023-02-22 12:34:51 -08:00
commit bf9bec4cb3
11 changed files with 204 additions and 136 deletions

View file

@ -3826,6 +3826,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
continue;
if (type == STACK_MISC)
continue;
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
@ -3863,6 +3865,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
continue;
if (type == STACK_ZERO)
continue;
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
@ -5754,7 +5758,8 @@ static int check_stack_range_initialized(
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
if (*stype == STACK_MISC)
goto mark;
if (*stype == STACK_ZERO) {
if ((*stype == STACK_ZERO) ||
(*stype == STACK_INVALID && env->allow_uninit_stack)) {
if (clobber) {
/* helper can write anything into the stack */
*stype = STACK_MISC;
@ -13936,6 +13941,10 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
if (env->allow_uninit_stack &&
old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
continue;
/* explored stack has more populated slots than current stack
* and these slots were used
*/

View file

@ -0,0 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "uninit_stack.skel.h"
void test_uninit_stack(void)
{
RUN_TESTS(uninit_stack);
}

View file

@ -5,12 +5,12 @@
#include "bpf_misc.h"
struct Small {
int x;
long x;
};
struct Big {
int x;
int y;
long x;
long y;
};
__noinline int foo(const struct Big *big)
@ -22,7 +22,7 @@ __noinline int foo(const struct Big *big)
}
SEC("cgroup_skb/ingress")
__failure __msg("invalid indirect read from stack")
__failure __msg("invalid indirect access to stack")
int global_func10(struct __sk_buff *skb)
{
const struct Small small = {.x = skb->len };

View file

@ -0,0 +1,87 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
/* Read an uninitialized value from stack at a fixed offset */
SEC("socket")
__naked int read_uninit_stack_fixed_off(void *ctx)
{
asm volatile (" \
r0 = 0; \
/* force stack depth to be 128 */ \
*(u64*)(r10 - 128) = r1; \
r1 = *(u8 *)(r10 - 8 ); \
r0 += r1; \
r1 = *(u8 *)(r10 - 11); \
r1 = *(u8 *)(r10 - 13); \
r1 = *(u8 *)(r10 - 15); \
r1 = *(u16*)(r10 - 16); \
r1 = *(u32*)(r10 - 32); \
r1 = *(u64*)(r10 - 64); \
/* read from a spill of a wrong size, it is a separate \
* branch in check_stack_read_fixed_off() \
*/ \
*(u32*)(r10 - 72) = r1; \
r1 = *(u64*)(r10 - 72); \
r0 = 0; \
exit; \
"
::: __clobber_all);
}
/* Read an uninitialized value from stack at a variable offset */
SEC("socket")
__naked int read_uninit_stack_var_off(void *ctx)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
/* force stack depth to be 64 */ \
*(u64*)(r10 - 64) = r0; \
r0 = -r0; \
/* give r0 a range [-31, -1] */ \
if r0 s<= -32 goto exit_%=; \
if r0 s>= 0 goto exit_%=; \
/* access stack using r0 */ \
r1 = r10; \
r1 += r0; \
r2 = *(u8*)(r1 + 0); \
exit_%=: r0 = 0; \
exit; \
"
:
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
static __noinline void dummy(void) {}
/* Pass a pointer to uninitialized stack memory to a helper.
* Passed memory block should be marked as STACK_MISC after helper call.
*/
SEC("socket")
__log_level(7) __msg("fp-104=mmmmmmmm")
__naked int helper_uninit_to_misc(void *ctx)
{
asm volatile (" \
/* force stack depth to be 128 */ \
*(u64*)(r10 - 128) = r1; \
r1 = r10; \
r1 += -128; \
r2 = 32; \
call %[bpf_trace_printk]; \
/* Call to dummy() forces print_verifier_state(..., true), \
* thus showing the stack state, matched by __msg(). \
*/ \
call %[dummy]; \
r0 = 0; \
exit; \
"
:
: __imm(bpf_trace_printk),
__imm(dummy)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";

View file

@ -2221,19 +2221,22 @@
* that fp-8 stack slot was unused in the fall-through
* branch and will accept the program incorrectly
*/
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 6 },
.errstr = "invalid indirect read from stack R2 off -8+0 size 8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map_hash_48b = { 7 },
.errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"calls: ctx read at start of subprog",

View file

@ -29,19 +29,30 @@
{
"helper access to variable memory: stack, bitwise AND, zero included",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
/* set max stack size */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
/* set r3 to a random value */
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
/* use bitwise AND to limit r3 range to [0, 64] */
BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_4, 0),
/* Call bpf_ringbuf_output(), it is one of a few helper functions with
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
* For unpriv this should signal an error, because memory at &fp[-64] is
* not initialized.
*/
BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack R1 off -64+0 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.fixup_map_ringbuf = { 4 },
.errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
@ -183,20 +194,31 @@
{
"helper access to variable memory: stack, JMP, no min check",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
/* set max stack size */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
/* set r3 to a random value */
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
/* use JMP to limit r3 range to [0, 64] */
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_4, 0),
/* Call bpf_ringbuf_output(), it is one of a few helper functions with
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
* For unpriv this should signal an error, because memory at &fp[-64] is
* not initialized.
*/
BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack R1 off -64+0 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.fixup_map_ringbuf = { 4 },
.errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"helper access to variable memory: stack, JMP (signed), no min check",
@ -564,29 +586,41 @@
{
"helper access to variable memory: 8 bytes leak",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
/* set max stack size */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
/* set r3 to a random value */
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
/* Note: fp[-32] left uninitialized */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
/* Limit r3 range to [1, 64] */
BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1),
BPF_MOV64_IMM(BPF_REG_4, 0),
/* Call bpf_ringbuf_output(), it is one of a few helper functions with
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
* For unpriv this should signal an error, because memory region [1, 64]
* at &fp[-64] is not fully initialized.
*/
BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack R1 off -64+32 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.fixup_map_ringbuf = { 3 },
.errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"helper access to variable memory: 8 bytes no leak (init memory)",

View file

@ -54,12 +54,13 @@
/* bpf_strtoul() */
BPF_EMIT_CALL(BPF_FUNC_strtoul),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
.errstr = "invalid indirect read from stack R4 off -16+4 size 8",
.result_unpriv = REJECT,
.errstr_unpriv = "invalid indirect read from stack R4 off -16+4 size 8",
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"ARG_PTR_TO_LONG misaligned",

View file

@ -128,9 +128,10 @@
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.errstr = "invalid read from stack off -16+0 size 8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr_unpriv = "invalid read from stack off -16+0 size 8",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"precision tracking for u32 spill/fill",
@ -258,6 +259,8 @@
BPF_EXIT_INSN(),
},
.flags = BPF_F_TEST_STATE_FREQ,
.errstr = "invalid read from stack off -8+1 size 8",
.result = REJECT,
.errstr_unpriv = "invalid read from stack off -8+1 size 8",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},

View file

@ -530,33 +530,6 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_4, 1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_sk_storage_map = { 14 },
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "invalid indirect read from stack",
},
{
"bpf_map_lookup_elem(smap, &key)",
.insns = {

View file

@ -171,9 +171,10 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid read from stack off -4+0 size 4",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result_unpriv = REJECT,
.errstr_unpriv = "invalid read from stack off -4+0 size 4",
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"Spill a u32 const scalar. Refill as u16. Offset to skb->data",

View file

@ -212,31 +212,6 @@
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"indirect variable-offset stack access, max_off+size > max_initialized",
.insns = {
/* Fill only the second from top 8 bytes of the stack. */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
/* Get an unknown value. */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
/* Make it small and 4-byte aligned. */
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
/* Add it to fp. We now have either fp-12 or fp-16, but we don't know
* which. fp-12 size 8 is partially uninitialized stack.
*/
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
/* Dereference it indirectly. */
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 5 },
.errstr = "invalid indirect read from stack R2 var_off",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"indirect variable-offset stack access, min_off < min_initialized",
.insns = {
@ -289,33 +264,6 @@
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"indirect variable-offset stack access, uninitialized",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 6),
BPF_MOV64_IMM(BPF_REG_3, 28),
/* Fill the top 16 bytes of the stack. */
BPF_ST_MEM(BPF_W, BPF_REG_10, -16, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* Get an unknown value. */
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 0),
/* Make it small and 4-byte aligned. */
BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 4),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
/* Add it to fp. We now have either fp-12 or fp-16, we don't know
* which, but either way it points to initialized stack.
*/
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_5, 8),
/* Dereference it indirectly. */
BPF_EMIT_CALL(BPF_FUNC_getsockopt),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack R4 var_off",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SOCK_OPS,
},
{
"indirect variable-offset stack access, ok",
.insns = {