2019-01-25 23:24:43 +00:00
|
|
|
{
|
|
|
|
"access skb fields ok",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, len)),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, pkt_type)),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, queue_mapping)),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, protocol)),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, vlan_present)),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, vlan_tci)),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, napi_id)),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"access skb fields bad1",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"access skb fields bad2",
|
|
|
|
.insns = {
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, pkt_type)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.fixup_map_hash_8b = { 4 },
|
|
|
|
.errstr = "different pointers",
|
|
|
|
.errstr_unpriv = "R1 pointer comparison",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"access skb fields bad3",
|
|
|
|
.insns = {
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, pkt_type)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -12),
|
|
|
|
},
|
|
|
|
.fixup_map_hash_8b = { 6 },
|
|
|
|
.errstr = "different pointers",
|
|
|
|
.errstr_unpriv = "R1 pointer comparison",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"access skb fields bad4",
|
|
|
|
.insns = {
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, len)),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, -13),
|
|
|
|
},
|
|
|
|
.fixup_map_hash_8b = { 7 },
|
|
|
|
.errstr = "different pointers",
|
|
|
|
.errstr_unpriv = "R1 pointer comparison",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"invalid access __sk_buff family",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, family)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"invalid access __sk_buff remote_ip4",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, remote_ip4)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"invalid access __sk_buff local_ip4",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, local_ip4)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"invalid access __sk_buff remote_ip6",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, remote_ip6)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"invalid access __sk_buff local_ip6",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, local_ip6)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"invalid access __sk_buff remote_port",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, remote_port)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"invalid access __sk_buff remote_port",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, local_port)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"valid access __sk_buff family",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, family)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"valid access __sk_buff remote_ip4",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, remote_ip4)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"valid access __sk_buff local_ip4",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, local_ip4)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"valid access __sk_buff remote_ip6",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, remote_ip6[0])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, remote_ip6[1])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, remote_ip6[2])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, remote_ip6[3])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"valid access __sk_buff local_ip6",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, local_ip6[0])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, local_ip6[1])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, local_ip6[2])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, local_ip6[3])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"valid access __sk_buff remote_port",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, remote_port)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"valid access __sk_buff remote_port",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, local_port)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"invalid access of tc_classid for SK_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, tc_classid)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"invalid access of skb->mark for SK_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->mark is not writeable by SK_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->tc_index is writeable by SK_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, tc_index)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->priority is writeable by SK_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, priority)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"direct packet read for SK_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"direct packet write for SK_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"overlapping checks for direct packet access SK_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data_end)),
|
|
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
|
|
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SK_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->mark is not writeable by sockets",
|
|
|
|
.insns = {
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.errstr_unpriv = "R1 leaks addr",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->tc_index is not writeable by sockets",
|
|
|
|
.insns = {
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, tc_index)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.errstr_unpriv = "R1 leaks addr",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: byte",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 1),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 2),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 3),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[1])),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[1]) + 1),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[1]) + 2),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[1]) + 3),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[2])),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[2]) + 1),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[2]) + 2),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[2]) + 3),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[3])),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[3]) + 1),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[3]) + 2),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[3]) + 3),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4])),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 1),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 2),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 3),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 3),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[1])),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[1]) + 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[1]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[1]) + 3),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[2])),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[2]) + 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[2]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[2]) + 3),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[3])),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[3]) + 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[3]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[3]) + 3),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[4])),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 1),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"__sk_buff->hash, offset 0, byte store not permitted",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, hash)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"__sk_buff->tc_index, offset 3, byte store not permitted",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, tc_index) + 3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->hash byte load permitted",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash)),
|
|
|
|
#else
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 3),
|
|
|
|
#endif
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->hash byte load permitted 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->hash byte load permitted 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->hash byte load permitted 3",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 3),
|
|
|
|
#else
|
|
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash)),
|
|
|
|
#endif
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: byte, wrong type",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: half",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 2),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[1])),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[1]) + 2),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[2])),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[2]) + 2),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[3])),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[3]) + 2),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4])),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[1])),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[1]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[2])),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[2]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[3])),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[3]) + 2),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[4])),
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: half, unaligned",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "misaligned context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check __sk_buff->hash, offset 0, half store not permitted",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, hash)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check __sk_buff->tc_index, offset 2, half store not permitted",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, tc_index) + 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->hash half load permitted",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash)),
|
|
|
|
#else
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 2),
|
|
|
|
#endif
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->hash half load permitted 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 2),
|
|
|
|
#else
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash)),
|
|
|
|
#endif
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->hash half load not permitted, unaligned 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 1),
|
|
|
|
#else
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 3),
|
|
|
|
#endif
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
2019-02-05 12:41:25 +00:00
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
2019-01-25 23:24:43 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->hash half load not permitted, unaligned 3",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 3),
|
|
|
|
#else
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, hash) + 1),
|
|
|
|
#endif
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: half, wrong type",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: word",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[1])),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[2])),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[3])),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[1])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[2])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[3])),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[4])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: word, unaligned 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "misaligned context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: word, unaligned 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 1),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "misaligned context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: word, unaligned 3",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 2),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "misaligned context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: word, unaligned 4",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4]) + 3),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "misaligned context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: double",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[2])),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[2])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: double, unaligned 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[1])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "misaligned context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: double, unaligned 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[3])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "misaligned context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: double, oob 1",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[4])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: double, oob 2",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[4])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check __sk_buff->ifindex dw store not permitted",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, ifindex)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check __sk_buff->ifindex dw load not permitted",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, ifindex)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check cb access: double, wrong type",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check out of range skb->cb access",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0]) + 256),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.errstr_unpriv = "",
|
|
|
|
.result = REJECT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_ACT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"write skb fields from socket prog",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[4])),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, tc_index)),
|
|
|
|
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[2])),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.errstr_unpriv = "R1 leaks addr",
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"write skb fields from tc_cls_act prog",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, mark)),
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, tc_index)),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, tc_index)),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, cb[3])),
|
|
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, tstamp)),
|
|
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, tstamp)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr_unpriv = "",
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check skb->data half load not permitted",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data)),
|
|
|
|
#else
|
|
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, data) + 2),
|
|
|
|
#endif
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = REJECT,
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"read gso_segs from CGROUP_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, gso_segs)),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
|
|
|
},
|
2019-07-23 10:15:38 +00:00
|
|
|
{
|
|
|
|
"read gso_segs from CGROUP_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, gso_segs)),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
|
|
|
},
|
2019-01-25 23:24:43 +00:00
|
|
|
{
|
|
|
|
"write gso_segs from CGROUP_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, gso_segs)),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = REJECT,
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.errstr = "invalid bpf_context access off=164 size=4",
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"read gso_segs from CLS",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, gso_segs)),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
},
|
2020-03-03 20:05:03 +00:00
|
|
|
{
|
|
|
|
"read gso_size from CGROUP_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, gso_size)),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"read gso_size from CGROUP_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, gso_size)),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"write gso_size from CGROUP_SKB",
|
|
|
|
.insns = {
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
|
|
|
|
offsetof(struct __sk_buff, gso_size)),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = REJECT,
|
|
|
|
.result_unpriv = REJECT,
|
|
|
|
.errstr = "invalid bpf_context access off=176 size=4",
|
|
|
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"read gso_size from CLS",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, gso_size)),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.result = ACCEPT,
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
},
|
2019-01-25 23:24:44 +00:00
|
|
|
{
|
|
|
|
"check wire_len is not readable by sockets",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, wire_len)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check wire_len is readable by tc classifier",
|
|
|
|
.insns = {
|
|
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, wire_len)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.result = ACCEPT,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"check wire_len is not writable by tc classifier",
|
|
|
|
.insns = {
|
|
|
|
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
|
|
|
|
offsetof(struct __sk_buff, wire_len)),
|
|
|
|
BPF_EXIT_INSN(),
|
|
|
|
},
|
|
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
|
|
.errstr = "invalid bpf_context access",
|
|
|
|
.errstr_unpriv = "R1 leaks addr",
|
|
|
|
.result = REJECT,
|
|
|
|
},
|