bpf: Convert bpf_preload.ko to use light skeleton.

The main change is a move of the single line
  #include "iterators.lskel.h"
from iterators/iterators.c to bpf_preload_kern.c.
Which means that generated light skeleton can be used from user space or
user mode driver like iterators.c or from the kernel module or the kernel itself.
The direct use of light skeleton from the kernel module simplifies the code,
since UMD is no longer necessary. The libbpf.a required user space and UMD. The
CO-RE in the kernel and generated "loader bpf program" used by the light
skeleton are capable to perform complex loading operations traditionally
provided by libbpf. In addition UMD approach was launching UMD process
every time bpffs has to be mounted. With light skeleton in the kernel
the bpf_preload kernel module loads bpf iterators once and pins them
multiple times into different bpffs mounts.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Yonghong Song <yhs@fb.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220209232001.27490-6-alexei.starovoitov@gmail.com
This commit is contained in:
Alexei Starovoitov 2022-02-09 15:20:01 -08:00 committed by Daniel Borkmann
parent d7beb3d6ab
commit cb80ddc671
9 changed files with 77 additions and 254 deletions

View File

@ -710,11 +710,10 @@ static DEFINE_MUTEX(bpf_preload_lock);
static int populate_bpffs(struct dentry *parent)
{
struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
struct bpf_link *links[BPF_PRELOAD_LINKS] = {};
int err = 0, i;
/* grab the mutex to make sure the kernel interactions with bpf_preload
* UMD are serialized
* are serialized
*/
mutex_lock(&bpf_preload_lock);
@ -722,40 +721,22 @@ static int populate_bpffs(struct dentry *parent)
if (!bpf_preload_mod_get())
goto out;
if (!bpf_preload_ops->info.tgid) {
/* preload() will start UMD that will load BPF iterator programs */
err = bpf_preload_ops->preload(objs);
if (err)
err = bpf_preload_ops->preload(objs);
if (err)
goto out_put;
for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
bpf_link_inc(objs[i].link);
err = bpf_iter_link_pin_kernel(parent,
objs[i].link_name, objs[i].link);
if (err) {
bpf_link_put(objs[i].link);
goto out_put;
for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
links[i] = bpf_link_by_id(objs[i].link_id);
if (IS_ERR(links[i])) {
err = PTR_ERR(links[i]);
goto out_put;
}
}
for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
err = bpf_iter_link_pin_kernel(parent,
objs[i].link_name, links[i]);
if (err)
goto out_put;
/* do not unlink successfully pinned links even
* if later link fails to pin
*/
links[i] = NULL;
}
/* finish() will tell UMD process to exit */
err = bpf_preload_ops->finish();
if (err)
goto out_put;
}
out_put:
bpf_preload_mod_put();
out:
mutex_unlock(&bpf_preload_lock);
for (i = 0; i < BPF_PRELOAD_LINKS && err; i++)
if (!IS_ERR_OR_NULL(links[i]))
bpf_link_put(links[i]);
return err;
}

View File

@ -18,10 +18,9 @@ menuconfig BPF_PRELOAD
if BPF_PRELOAD
config BPF_PRELOAD_UMD
tristate "bpf_preload kernel module with user mode driver"
depends on CC_CAN_LINK
depends on m || CC_CAN_LINK_STATIC
tristate "bpf_preload kernel module"
default m
help
This builds bpf_preload kernel module with embedded user mode driver.
This builds bpf_preload kernel module with embedded BPF programs for
introspection in bpffs.
endif

View File

@ -3,16 +3,6 @@
LIBBPF_SRCS = $(srctree)/tools/lib/bpf/
LIBBPF_INCLUDE = $(LIBBPF_SRCS)/..
userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
-I $(LIBBPF_INCLUDE) -Wno-unused-result
userprogs := bpf_preload_umd
bpf_preload_umd-objs := iterators/iterators.o
$(obj)/bpf_preload_umd:
$(obj)/bpf_preload_umd_blob.o: $(obj)/bpf_preload_umd
obj-$(CONFIG_BPF_PRELOAD_UMD) += bpf_preload.o
bpf_preload-objs += bpf_preload_kern.o bpf_preload_umd_blob.o
CFLAGS_bpf_preload_kern.o += -I $(LIBBPF_INCLUDE)
bpf_preload-objs += bpf_preload_kern.o

View File

@ -2,13 +2,13 @@
#ifndef _BPF_PRELOAD_H
#define _BPF_PRELOAD_H
#include <linux/usermode_driver.h>
#include "iterators/bpf_preload_common.h"
struct bpf_preload_info {
char link_name[16];
struct bpf_link *link;
};
struct bpf_preload_ops {
struct umd_info info;
int (*preload)(struct bpf_preload_info *);
int (*finish)(void);
struct module *owner;
};
extern struct bpf_preload_ops *bpf_preload_ops;

View File

@ -2,101 +2,80 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pid.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
#include "bpf_preload.h"
#include "iterators/iterators.lskel.h"
extern char bpf_preload_umd_start;
extern char bpf_preload_umd_end;
static struct bpf_link *maps_link, *progs_link;
static struct iterators_bpf *skel;
static int preload(struct bpf_preload_info *obj);
static int finish(void);
static struct bpf_preload_ops umd_ops = {
.info.driver_name = "bpf_preload",
.preload = preload,
.finish = finish,
.owner = THIS_MODULE,
};
static void free_links_and_skel(void)
{
if (!IS_ERR_OR_NULL(maps_link))
bpf_link_put(maps_link);
if (!IS_ERR_OR_NULL(progs_link))
bpf_link_put(progs_link);
iterators_bpf__destroy(skel);
}
static int preload(struct bpf_preload_info *obj)
{
int magic = BPF_PRELOAD_START;
loff_t pos = 0;
int i, err;
ssize_t n;
err = fork_usermode_driver(&umd_ops.info);
if (err)
return err;
/* send the start magic to let UMD proceed with loading BPF progs */
n = kernel_write(umd_ops.info.pipe_to_umh,
&magic, sizeof(magic), &pos);
if (n != sizeof(magic))
return -EPIPE;
/* receive bpf_link IDs and names from UMD */
pos = 0;
for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
n = kernel_read(umd_ops.info.pipe_from_umh,
&obj[i], sizeof(*obj), &pos);
if (n != sizeof(*obj))
return -EPIPE;
}
strlcpy(obj[0].link_name, "maps.debug", sizeof(obj[0].link_name));
obj[0].link = maps_link;
strlcpy(obj[1].link_name, "progs.debug", sizeof(obj[1].link_name));
obj[1].link = progs_link;
return 0;
}
static int finish(void)
{
int magic = BPF_PRELOAD_END;
struct pid *tgid;
loff_t pos = 0;
ssize_t n;
static struct bpf_preload_ops ops = {
.preload = preload,
.owner = THIS_MODULE,
};
/* send the last magic to UMD. It will do a normal exit. */
n = kernel_write(umd_ops.info.pipe_to_umh,
&magic, sizeof(magic), &pos);
if (n != sizeof(magic))
return -EPIPE;
tgid = umd_ops.info.tgid;
if (tgid) {
wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
umd_cleanup_helper(&umd_ops.info);
}
return 0;
}
static int __init load_umd(void)
static int load_skel(void)
{
int err;
err = umd_load_blob(&umd_ops.info, &bpf_preload_umd_start,
&bpf_preload_umd_end - &bpf_preload_umd_start);
skel = iterators_bpf__open();
if (!skel)
return -ENOMEM;
err = iterators_bpf__load(skel);
if (err)
return err;
bpf_preload_ops = &umd_ops;
goto out;
err = iterators_bpf__attach(skel);
if (err)
goto out;
maps_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd);
if (IS_ERR(maps_link)) {
err = PTR_ERR(maps_link);
goto out;
}
progs_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd);
if (IS_ERR(progs_link)) {
err = PTR_ERR(progs_link);
goto out;
}
return 0;
out:
free_links_and_skel();
return err;
}
static void __exit fini_umd(void)
static int __init load(void)
{
struct pid *tgid;
int err;
bpf_preload_ops = NULL;
/* kill UMD in case it's still there due to earlier error */
tgid = umd_ops.info.tgid;
if (tgid) {
kill_pid(tgid, SIGKILL, 1);
wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
umd_cleanup_helper(&umd_ops.info);
}
umd_unload_blob(&umd_ops.info);
err = load_skel();
if (err)
return err;
bpf_preload_ops = &ops;
return err;
}
late_initcall(load_umd);
module_exit(fini_umd);
static void __exit fini(void)
{
bpf_preload_ops = NULL;
free_links_and_skel();
}
late_initcall(load);
module_exit(fini);
MODULE_LICENSE("GPL");

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
.section .init.rodata, "a"
.global bpf_preload_umd_start
bpf_preload_umd_start:
.incbin "kernel/bpf/preload/bpf_preload_umd"
.global bpf_preload_umd_end
bpf_preload_umd_end:

View File

@ -1,13 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BPF_PRELOAD_COMMON_H
#define _BPF_PRELOAD_COMMON_H
#define BPF_PRELOAD_START 0x5555
#define BPF_PRELOAD_END 0xAAAA
struct bpf_preload_info {
char link_name[16];
int link_id;
};
#endif

View File

@ -1,108 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/resource.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
#include <sys/mount.h>
#include "iterators.lskel.h"
#include "bpf_preload_common.h"
int to_kernel = -1;
int from_kernel = 0;
static int __bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
{
union bpf_attr attr;
int err;
memset(&attr, 0, sizeof(attr));
attr.info.bpf_fd = bpf_fd;
attr.info.info_len = *info_len;
attr.info.info = (long) info;
err = skel_sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
if (!err)
*info_len = attr.info.info_len;
return err;
}
static int send_link_to_kernel(int link_fd, const char *link_name)
{
struct bpf_preload_info obj = {};
struct bpf_link_info info = {};
__u32 info_len = sizeof(info);
int err;
err = __bpf_obj_get_info_by_fd(link_fd, &info, &info_len);
if (err)
return err;
obj.link_id = info.id;
if (strlen(link_name) >= sizeof(obj.link_name))
return -E2BIG;
strcpy(obj.link_name, link_name);
if (write(to_kernel, &obj, sizeof(obj)) != sizeof(obj))
return -EPIPE;
return 0;
}
int main(int argc, char **argv)
{
struct iterators_bpf *skel;
int err, magic;
int debug_fd;
debug_fd = open("/dev/console", O_WRONLY | O_NOCTTY | O_CLOEXEC);
if (debug_fd < 0)
return 1;
to_kernel = dup(1);
close(1);
dup(debug_fd);
/* now stdin and stderr point to /dev/console */
read(from_kernel, &magic, sizeof(magic));
if (magic != BPF_PRELOAD_START) {
printf("bad start magic %d\n", magic);
return 1;
}
/* libbpf opens BPF object and loads it into the kernel */
skel = iterators_bpf__open_and_load();
if (!skel) {
/* iterators.skel.h is little endian.
* libbpf doesn't support automatic little->big conversion
* of BPF bytecode yet.
* The program load will fail in such case.
*/
printf("Failed load could be due to wrong endianness\n");
return 1;
}
err = iterators_bpf__attach(skel);
if (err)
goto cleanup;
/* send two bpf_link IDs with names to the kernel */
err = send_link_to_kernel(skel->links.dump_bpf_map_fd, "maps.debug");
if (err)
goto cleanup;
err = send_link_to_kernel(skel->links.dump_bpf_prog_fd, "progs.debug");
if (err)
goto cleanup;
/* The kernel will proceed with pinnging the links in bpffs.
* UMD will wait on read from pipe.
*/
read(from_kernel, &magic, sizeof(magic));
if (magic != BPF_PRELOAD_END) {
printf("bad final magic %d\n", magic);
err = -EINVAL;
}
cleanup:
iterators_bpf__destroy(skel);
return err != 0;
}

View File

@ -2490,6 +2490,7 @@ void bpf_link_put(struct bpf_link *link)
bpf_link_free(link);
}
}
EXPORT_SYMBOL(bpf_link_put);
static int bpf_link_release(struct inode *inode, struct file *filp)
{
@ -2632,6 +2633,7 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd)
return link;
}
EXPORT_SYMBOL(bpf_link_get_from_fd);
struct bpf_tracing_link {
struct bpf_link link;