linux-stable/kernel/bpf/bloom_filter.c
Eric Dumazet ad10c381d1 bpf: Add missing map_delete_elem method to bloom filter map
Without it, kernel crashes in map_delete_elem(), as reported
by syzbot.

BUG: kernel NULL pointer dereference, address: 0000000000000000
PGD 72c97067 P4D 72c97067 PUD 1e20c067 PMD 0
Oops: 0010 [#1] PREEMPT SMP KASAN
CPU: 0 PID: 6518 Comm: syz-executor196 Not tainted 5.15.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:0x0
Code: Unable to access opcode bytes at RIP 0xffffffffffffffd6.
RSP: 0018:ffffc90002bafcb8 EFLAGS: 00010246
RAX: dffffc0000000000 RBX: 1ffff92000575f9f RCX: 0000000000000000
RDX: 1ffffffff1327aba RSI: 0000000000000000 RDI: ffff888025a30c00
RBP: ffffc90002baff08 R08: 0000000000000000 R09: 0000000000000001
R10: ffffffff818525d8 R11: 0000000000000000 R12: ffffffff8993d560
R13: ffff888025a30c00 R14: ffff888024bc0000 R15: 0000000000000000
FS:  0000555557491300(0000) GS:ffff8880b9c00000(0000) knlGS:0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: ffffffffffffffd6 CR3: 0000000070189000 CR4: 00000000003506f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
 map_delete_elem kernel/bpf/syscall.c:1220 [inline]
 __sys_bpf+0x34f1/0x5ee0 kernel/bpf/syscall.c:4606
 __do_sys_bpf kernel/bpf/syscall.c:4719 [inline]
 __se_sys_bpf kernel/bpf/syscall.c:4717 [inline]
 __x64_sys_bpf+0x75/0xb0 kernel/bpf/syscall.c:4717
 do_syscall_x64 arch/x86/entry/common.c:50 [inline]

Fixes: 9330986c03 ("bpf: Add bloom filter map implementation")
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20211031171353.4092388-1-eric.dumazet@gmail.com
2021-11-01 14:22:44 -07:00

204 lines
5.5 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/bitmap.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/err.h>
#include <linux/jhash.h>
#include <linux/random.h>
#define BLOOM_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK)
struct bpf_bloom_filter {
struct bpf_map map;
u32 bitset_mask;
u32 hash_seed;
/* If the size of the values in the bloom filter is u32 aligned,
* then it is more performant to use jhash2 as the underlying hash
* function, else we use jhash. This tracks the number of u32s
* in an u32-aligned value size. If the value size is not u32 aligned,
* this will be 0.
*/
u32 aligned_u32_count;
u32 nr_hash_funcs;
unsigned long bitset[];
};
static u32 hash(struct bpf_bloom_filter *bloom, void *value,
u32 value_size, u32 index)
{
u32 h;
if (bloom->aligned_u32_count)
h = jhash2(value, bloom->aligned_u32_count,
bloom->hash_seed + index);
else
h = jhash(value, value_size, bloom->hash_seed + index);
return h & bloom->bitset_mask;
}
static int bloom_map_peek_elem(struct bpf_map *map, void *value)
{
struct bpf_bloom_filter *bloom =
container_of(map, struct bpf_bloom_filter, map);
u32 i, h;
for (i = 0; i < bloom->nr_hash_funcs; i++) {
h = hash(bloom, value, map->value_size, i);
if (!test_bit(h, bloom->bitset))
return -ENOENT;
}
return 0;
}
static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
{
struct bpf_bloom_filter *bloom =
container_of(map, struct bpf_bloom_filter, map);
u32 i, h;
if (flags != BPF_ANY)
return -EINVAL;
for (i = 0; i < bloom->nr_hash_funcs; i++) {
h = hash(bloom, value, map->value_size, i);
set_bit(h, bloom->bitset);
}
return 0;
}
static int bloom_map_pop_elem(struct bpf_map *map, void *value)
{
return -EOPNOTSUPP;
}
static int bloom_map_delete_elem(struct bpf_map *map, void *value)
{
return -EOPNOTSUPP;
}
static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
{
u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_bloom_filter *bloom;
if (!bpf_capable())
return ERR_PTR(-EPERM);
if (attr->key_size != 0 || attr->value_size == 0 ||
attr->max_entries == 0 ||
attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||
!bpf_map_flags_access_ok(attr->map_flags) ||
/* The lower 4 bits of map_extra (0xF) specify the number
* of hash functions
*/
(attr->map_extra & ~0xF))
return ERR_PTR(-EINVAL);
nr_hash_funcs = attr->map_extra;
if (nr_hash_funcs == 0)
/* Default to using 5 hash functions if unspecified */
nr_hash_funcs = 5;
/* For the bloom filter, the optimal bit array size that minimizes the
* false positive probability is n * k / ln(2) where n is the number of
* expected entries in the bloom filter and k is the number of hash
* functions. We use 7 / 5 to approximate 1 / ln(2).
*
* We round this up to the nearest power of two to enable more efficient
* hashing using bitmasks. The bitmask will be the bit array size - 1.
*
* If this overflows a u32, the bit array size will have 2^32 (4
* GB) bits.
*/
if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) ||
check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) ||
nr_bits > (1UL << 31)) {
/* The bit array size is 2^32 bits but to avoid overflowing the
* u32, we use U32_MAX, which will round up to the equivalent
* number of bytes
*/
bitset_bytes = BITS_TO_BYTES(U32_MAX);
bitset_mask = U32_MAX;
} else {
if (nr_bits <= BITS_PER_LONG)
nr_bits = BITS_PER_LONG;
else
nr_bits = roundup_pow_of_two(nr_bits);
bitset_bytes = BITS_TO_BYTES(nr_bits);
bitset_mask = nr_bits - 1;
}
bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long));
bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node);
if (!bloom)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&bloom->map, attr);
bloom->nr_hash_funcs = nr_hash_funcs;
bloom->bitset_mask = bitset_mask;
/* Check whether the value size is u32-aligned */
if ((attr->value_size & (sizeof(u32) - 1)) == 0)
bloom->aligned_u32_count =
attr->value_size / sizeof(u32);
if (!(attr->map_flags & BPF_F_ZERO_SEED))
bloom->hash_seed = get_random_int();
return &bloom->map;
}
static void bloom_map_free(struct bpf_map *map)
{
struct bpf_bloom_filter *bloom =
container_of(map, struct bpf_bloom_filter, map);
bpf_map_area_free(bloom);
}
static void *bloom_map_lookup_elem(struct bpf_map *map, void *key)
{
/* The eBPF program should use map_peek_elem instead */
return ERR_PTR(-EINVAL);
}
static int bloom_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
/* The eBPF program should use map_push_elem instead */
return -EINVAL;
}
static int bloom_map_check_btf(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
/* Bloom filter maps are keyless */
return btf_type_is_void(key_type) ? 0 : -EINVAL;
}
static int bpf_bloom_map_btf_id;
const struct bpf_map_ops bloom_filter_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc = bloom_map_alloc,
.map_free = bloom_map_free,
.map_push_elem = bloom_map_push_elem,
.map_peek_elem = bloom_map_peek_elem,
.map_pop_elem = bloom_map_pop_elem,
.map_lookup_elem = bloom_map_lookup_elem,
.map_update_elem = bloom_map_update_elem,
.map_delete_elem = bloom_map_delete_elem,
.map_check_btf = bloom_map_check_btf,
.map_btf_name = "bpf_bloom_filter",
.map_btf_id = &bpf_bloom_map_btf_id,
};