mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
selftests/bpf: BPF task work scheduling tests
Introducing selftests that check BPF task work scheduling mechanism. Validate that verifier does not accepts incorrect calls to bpf_task_work_schedule kfunc. Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com> Acked-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20250923112404.668720-9-mykyta.yatsenko5@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
38aa7003e3
commit
39fd74dfd5
150
tools/testing/selftests/bpf/prog_tests/test_task_work.c
Normal file
150
tools/testing/selftests/bpf/prog_tests/test_task_work.c
Normal file
@@ -0,0 +1,150 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
|
||||
#include <test_progs.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "task_work.skel.h"
|
||||
#include "task_work_fail.skel.h"
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <time.h>
|
||||
|
||||
static int perf_event_open(__u32 type, __u64 config, int pid)
|
||||
{
|
||||
struct perf_event_attr attr = {
|
||||
.type = type,
|
||||
.config = config,
|
||||
.size = sizeof(struct perf_event_attr),
|
||||
.sample_period = 100000,
|
||||
};
|
||||
|
||||
return syscall(__NR_perf_event_open, &attr, pid, -1, -1, 0);
|
||||
}
|
||||
|
||||
struct elem {
|
||||
char data[128];
|
||||
struct bpf_task_work tw;
|
||||
};
|
||||
|
||||
static int verify_map(struct bpf_map *map, const char *expected_data)
|
||||
{
|
||||
int err;
|
||||
struct elem value;
|
||||
int processed_values = 0;
|
||||
int k, sz;
|
||||
|
||||
sz = bpf_map__max_entries(map);
|
||||
for (k = 0; k < sz; ++k) {
|
||||
err = bpf_map__lookup_elem(map, &k, sizeof(int), &value, sizeof(struct elem), 0);
|
||||
if (err)
|
||||
continue;
|
||||
if (!ASSERT_EQ(strcmp(expected_data, value.data), 0, "map data")) {
|
||||
fprintf(stderr, "expected '%s', found '%s' in %s map", expected_data,
|
||||
value.data, bpf_map__name(map));
|
||||
return 2;
|
||||
}
|
||||
processed_values++;
|
||||
}
|
||||
|
||||
return processed_values == 0;
|
||||
}
|
||||
|
||||
static void task_work_run(const char *prog_name, const char *map_name)
|
||||
{
|
||||
struct task_work *skel;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_map *map;
|
||||
struct bpf_link *link;
|
||||
int err, pe_fd = 0, pid, status, pipefd[2];
|
||||
char user_string[] = "hello world";
|
||||
|
||||
if (!ASSERT_NEQ(pipe(pipefd), -1, "pipe"))
|
||||
return;
|
||||
|
||||
pid = fork();
|
||||
if (pid == 0) {
|
||||
__u64 num = 1;
|
||||
int i;
|
||||
char buf;
|
||||
|
||||
close(pipefd[1]);
|
||||
read(pipefd[0], &buf, sizeof(buf));
|
||||
close(pipefd[0]);
|
||||
|
||||
for (i = 0; i < 10000; ++i)
|
||||
num *= time(0) % 7;
|
||||
(void)num;
|
||||
exit(0);
|
||||
}
|
||||
ASSERT_GT(pid, 0, "fork() failed");
|
||||
|
||||
skel = task_work__open();
|
||||
if (!ASSERT_OK_PTR(skel, "task_work__open"))
|
||||
return;
|
||||
|
||||
bpf_object__for_each_program(prog, skel->obj) {
|
||||
bpf_program__set_autoload(prog, false);
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
|
||||
if (!ASSERT_OK_PTR(prog, "prog_name"))
|
||||
goto cleanup;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
skel->bss->user_ptr = (char *)user_string;
|
||||
|
||||
err = task_work__load(skel);
|
||||
if (!ASSERT_OK(err, "skel_load"))
|
||||
goto cleanup;
|
||||
|
||||
pe_fd = perf_event_open(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES, pid);
|
||||
if (pe_fd == -1 && (errno == ENOENT || errno == EOPNOTSUPP)) {
|
||||
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
|
||||
test__skip();
|
||||
goto cleanup;
|
||||
}
|
||||
if (!ASSERT_NEQ(pe_fd, -1, "pe_fd")) {
|
||||
fprintf(stderr, "perf_event_open errno: %d, pid: %d\n", errno, pid);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
link = bpf_program__attach_perf_event(prog, pe_fd);
|
||||
if (!ASSERT_OK_PTR(link, "attach_perf_event"))
|
||||
goto cleanup;
|
||||
|
||||
close(pipefd[0]);
|
||||
write(pipefd[1], user_string, 1);
|
||||
close(pipefd[1]);
|
||||
/* Wait to collect some samples */
|
||||
waitpid(pid, &status, 0);
|
||||
pid = 0;
|
||||
map = bpf_object__find_map_by_name(skel->obj, map_name);
|
||||
if (!ASSERT_OK_PTR(map, "find map_name"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_OK(verify_map(map, user_string), "verify map"))
|
||||
goto cleanup;
|
||||
cleanup:
|
||||
if (pe_fd >= 0)
|
||||
close(pe_fd);
|
||||
task_work__destroy(skel);
|
||||
if (pid) {
|
||||
close(pipefd[0]);
|
||||
write(pipefd[1], user_string, 1);
|
||||
close(pipefd[1]);
|
||||
waitpid(pid, &status, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void test_task_work(void)
|
||||
{
|
||||
if (test__start_subtest("test_task_work_hash_map"))
|
||||
task_work_run("oncpu_hash_map", "hmap");
|
||||
|
||||
if (test__start_subtest("test_task_work_array_map"))
|
||||
task_work_run("oncpu_array_map", "arrmap");
|
||||
|
||||
if (test__start_subtest("test_task_work_lru_map"))
|
||||
task_work_run("oncpu_lru_map", "lrumap");
|
||||
|
||||
RUN_TESTS(task_work_fail);
|
||||
}
|
||||
107
tools/testing/selftests/bpf/progs/task_work.c
Normal file
107
tools/testing/selftests/bpf/progs/task_work.c
Normal file
@@ -0,0 +1,107 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <vmlinux.h>
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include "bpf_misc.h"
|
||||
#include "errno.h"
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
const void *user_ptr = NULL;
|
||||
|
||||
struct elem {
|
||||
char data[128];
|
||||
struct bpf_task_work tw;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct elem);
|
||||
} hmap SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct elem);
|
||||
} arrmap SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct elem);
|
||||
} lrumap SEC(".maps");
|
||||
|
||||
static int process_work(struct bpf_map *map, void *key, void *value)
|
||||
{
|
||||
struct elem *work = value;
|
||||
|
||||
bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int key = 0;
|
||||
|
||||
SEC("perf_event")
|
||||
int oncpu_hash_map(struct pt_regs *args)
|
||||
{
|
||||
struct elem empty_work = { .data = { 0 } };
|
||||
struct elem *work;
|
||||
struct task_struct *task;
|
||||
int err;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
err = bpf_map_update_elem(&hmap, &key, &empty_work, BPF_NOEXIST);
|
||||
if (err)
|
||||
return 0;
|
||||
work = bpf_map_lookup_elem(&hmap, &key);
|
||||
if (!work)
|
||||
return 0;
|
||||
|
||||
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
int oncpu_array_map(struct pt_regs *args)
|
||||
{
|
||||
struct elem *work;
|
||||
struct task_struct *task;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
work = bpf_map_lookup_elem(&arrmap, &key);
|
||||
if (!work)
|
||||
return 0;
|
||||
bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
int oncpu_lru_map(struct pt_regs *args)
|
||||
{
|
||||
struct elem empty_work = { .data = { 0 } };
|
||||
struct elem *work;
|
||||
struct task_struct *task;
|
||||
int err;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
work = bpf_map_lookup_elem(&lrumap, &key);
|
||||
if (work)
|
||||
return 0;
|
||||
err = bpf_map_update_elem(&lrumap, &key, &empty_work, BPF_NOEXIST);
|
||||
if (err)
|
||||
return 0;
|
||||
work = bpf_map_lookup_elem(&lrumap, &key);
|
||||
if (!work || work->data[0])
|
||||
return 0;
|
||||
bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work, NULL);
|
||||
return 0;
|
||||
}
|
||||
96
tools/testing/selftests/bpf/progs/task_work_fail.c
Normal file
96
tools/testing/selftests/bpf/progs/task_work_fail.c
Normal file
@@ -0,0 +1,96 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <vmlinux.h>
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
const void *user_ptr = NULL;
|
||||
|
||||
struct elem {
|
||||
char data[128];
|
||||
struct bpf_task_work tw;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct elem);
|
||||
} hmap SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct elem);
|
||||
} arrmap SEC(".maps");
|
||||
|
||||
static int process_work(struct bpf_map *map, void *key, void *value)
|
||||
{
|
||||
struct elem *work = value;
|
||||
|
||||
bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int key = 0;
|
||||
|
||||
SEC("perf_event")
|
||||
__failure __msg("doesn't match map pointer in R3")
|
||||
int mismatch_map(struct pt_regs *args)
|
||||
{
|
||||
struct elem *work;
|
||||
struct task_struct *task;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
work = bpf_map_lookup_elem(&arrmap, &key);
|
||||
if (!work)
|
||||
return 0;
|
||||
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
__failure __msg("arg#1 doesn't point to a map value")
|
||||
int no_map_task_work(struct pt_regs *args)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct bpf_task_work tw;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
bpf_task_work_schedule_resume(task, &tw, &hmap, process_work, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
__failure __msg("Possibly NULL pointer passed to trusted arg1")
|
||||
int task_work_null(struct pt_regs *args)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
bpf_task_work_schedule_resume(task, NULL, &hmap, process_work, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
__failure __msg("Possibly NULL pointer passed to trusted arg2")
|
||||
int map_null(struct pt_regs *args)
|
||||
{
|
||||
struct elem *work;
|
||||
struct task_struct *task;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
work = bpf_map_lookup_elem(&arrmap, &key);
|
||||
if (!work)
|
||||
return 0;
|
||||
bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work, NULL);
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user