bpf: Use arch_bpf_trampoline_size

Instead of blindly allocating PAGE_SIZE for each trampoline, check the size
of the trampoline with arch_bpf_trampoline_size(). This size is saved in
bpf_tramp_image->size, and used for modmem charge/uncharge. The fallback
arch_alloc_bpf_trampoline() still allocates a whole page because we need to
use set_memory_* to protect the memory.

struct_ops trampoline still uses a whole page for multiple trampolines.

With this size check at caller (regular trampoline and struct_ops
trampoline), remove arch_bpf_trampoline_size() from
arch_prepare_bpf_trampoline() in archs.

Also, update bpf_image_ksym_add() to handle symbol of different sizes.

Signed-off-by: Song Liu <song@kernel.org>
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>  # on s390x
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Björn Töpel <bjorn@rivosinc.com>
Tested-by: Björn Töpel <bjorn@rivosinc.com> # on riscv
Link: https://lore.kernel.org/r/20231206224054.492250-7-song@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Song Liu 2023-12-06 14:40:53 -08:00 committed by Alexei Starovoitov
parent 96d1b7c081
commit 26ef208c20
6 changed files with 44 additions and 37 deletions

View File

@ -2079,13 +2079,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
if (nregs > 8)
return -ENOTSUPP;
ret = arch_bpf_trampoline_size(m, flags, tlinks, func_addr);
if (ret < 0)
return ret;
if (ret > ((long)image_end - (long)image))
return -EFBIG;
jit_fill_hole(image, (unsigned int)(image_end - image));
ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags);

View File

@ -1052,13 +1052,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
int ret;
struct rv_jit_context ctx;
ret = arch_bpf_trampoline_size(im, m, flags, tlinks, func_addr);
if (ret < 0)
return ret;
if (ret > (long)image_end - (long)image)
return -EFBIG;
ctx.ninsns = 0;
/*
* The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the

View File

@ -1141,6 +1141,7 @@ enum bpf_tramp_prog_type {
struct bpf_tramp_image {
void *image;
int size;
struct bpf_ksym ksym;
struct percpu_ref pcref;
void *ip_after_call;
@ -1325,7 +1326,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
/* Called only from JIT-enabled code, so there's no need for stubs. */
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);

View File

@ -355,6 +355,7 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
void *image, void *image_end)
{
u32 flags;
int size;
tlinks[BPF_TRAMP_FENTRY].links[0] = link;
tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
@ -362,6 +363,12 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
* and it must be used alone.
*/
flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
if (size < 0)
return size;
if (size > (unsigned long)image_end - (unsigned long)image)
return -E2BIG;
return arch_prepare_bpf_trampoline(NULL, image, image_end,
model, flags, tlinks, NULL);
}

View File

@ -154,7 +154,7 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
d->image = NULL;
goto out;
}
bpf_image_ksym_add(d->image, &d->ksym);
bpf_image_ksym_add(d->image, PAGE_SIZE, &d->ksym);
}
prev_num_progs = d->num_progs;

View File

@ -115,10 +115,10 @@ bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
(ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
}
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym)
{
ksym->start = (unsigned long) data;
ksym->end = ksym->start + PAGE_SIZE;
ksym->end = ksym->start + size;
bpf_ksym_add(ksym);
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
PAGE_SIZE, false, ksym->name);
@ -254,8 +254,8 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a
static void bpf_tramp_image_free(struct bpf_tramp_image *im)
{
bpf_image_ksym_del(&im->ksym);
arch_free_bpf_trampoline(im->image, PAGE_SIZE);
bpf_jit_uncharge_modmem(PAGE_SIZE);
arch_free_bpf_trampoline(im->image, im->size);
bpf_jit_uncharge_modmem(im->size);
percpu_ref_exit(&im->pcref);
kfree_rcu(im, rcu);
}
@ -349,7 +349,7 @@ static void bpf_tramp_image_put(struct bpf_tramp_image *im)
call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
}
static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, int size)
{
struct bpf_tramp_image *im;
struct bpf_ksym *ksym;
@ -360,12 +360,13 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
if (!im)
goto out;
err = bpf_jit_charge_modmem(PAGE_SIZE);
err = bpf_jit_charge_modmem(size);
if (err)
goto out_free_im;
im->size = size;
err = -ENOMEM;
im->image = image = arch_alloc_bpf_trampoline(PAGE_SIZE);
im->image = image = arch_alloc_bpf_trampoline(size);
if (!image)
goto out_uncharge;
@ -376,13 +377,13 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
ksym = &im->ksym;
INIT_LIST_HEAD_RCU(&ksym->lnode);
snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
bpf_image_ksym_add(image, ksym);
bpf_image_ksym_add(image, size, ksym);
return im;
out_free_image:
arch_free_bpf_trampoline(im->image, PAGE_SIZE);
arch_free_bpf_trampoline(im->image, im->size);
out_uncharge:
bpf_jit_uncharge_modmem(PAGE_SIZE);
bpf_jit_uncharge_modmem(size);
out_free_im:
kfree(im);
out:
@ -395,7 +396,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
struct bpf_tramp_links *tlinks;
u32 orig_flags = tr->flags;
bool ip_arg = false;
int err, total;
int err, total, size;
tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
if (IS_ERR(tlinks))
@ -408,12 +409,6 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
goto out;
}
im = bpf_tramp_image_alloc(tr->key);
if (IS_ERR(im)) {
err = PTR_ERR(im);
goto out;
}
/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
@ -437,13 +432,31 @@ again:
tr->flags |= BPF_TRAMP_F_ORIG_STACK;
#endif
err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
size = arch_bpf_trampoline_size(&tr->func.model, tr->flags,
tlinks, tr->func.addr);
if (size < 0) {
err = size;
goto out;
}
if (size > PAGE_SIZE) {
err = -E2BIG;
goto out;
}
im = bpf_tramp_image_alloc(tr->key, size);
if (IS_ERR(im)) {
err = PTR_ERR(im);
goto out;
}
err = arch_prepare_bpf_trampoline(im, im->image, im->image + size,
&tr->func.model, tr->flags, tlinks,
tr->func.addr);
if (err < 0)
goto out_free;
arch_protect_bpf_trampoline(im->image, PAGE_SIZE);
arch_protect_bpf_trampoline(im->image, im->size);
WARN_ON(tr->cur_image && total == 0);
if (tr->cur_image)
@ -463,8 +476,8 @@ again:
tr->fops->func = NULL;
tr->fops->trampoline = 0;
/* reset im->image memory attr for arch_prepare_bpf_trampoline */
arch_unprotect_bpf_trampoline(im->image, PAGE_SIZE);
/* free im memory and reallocate later */
bpf_tramp_image_free(im);
goto again;
}
#endif