selftests/bpf: Verify skb metadata in BPF instead of userspace

Move metadata verification into the BPF TC programs. Previously,
userspace read metadata from a map and verified it once at test end.

Now TC programs compare metadata directly using __builtin_memcmp() and
set a test_pass flag. This enables verification at multiple points during
test execution rather than a single final check.

Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://patch.msgid.link/20251105-skb-meta-rx-path-v4-10-5ceb08a9b37b@cloudflare.com
This commit is contained in:
Jakub Sitnicki
2025-11-05 21:19:47 +01:00
committed by Martin KaFai Lau
parent fb206fc312
commit 967534e57c
2 changed files with 57 additions and 83 deletions

View File

@@ -171,33 +171,6 @@ static int write_test_packet(int tap_fd)
return 0;
}
static void assert_test_result(const struct bpf_map *result_map)
{
int err;
__u32 map_key = 0;
__u8 map_value[TEST_PAYLOAD_LEN];
err = bpf_map__lookup_elem(result_map, &map_key, sizeof(map_key),
&map_value, TEST_PAYLOAD_LEN, BPF_ANY);
if (!ASSERT_OK(err, "lookup test_result"))
return;
ASSERT_MEMEQ(&map_value, &test_payload, TEST_PAYLOAD_LEN,
"test_result map contains test payload");
}
static bool clear_test_result(struct bpf_map *result_map)
{
const __u8 v[sizeof(test_payload)] = {};
const __u32 k = 0;
int err;
err = bpf_map__update_elem(result_map, &k, sizeof(k), v, sizeof(v), BPF_ANY);
ASSERT_OK(err, "update test_result");
return err == 0;
}
void test_xdp_context_veth(void)
{
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
@@ -270,11 +243,13 @@ void test_xdp_context_veth(void)
if (!ASSERT_GE(tx_ifindex, 0, "if_nametoindex tx"))
goto close;
skel->bss->test_pass = false;
ret = send_test_packet(tx_ifindex);
if (!ASSERT_OK(ret, "send_test_packet"))
goto close;
assert_test_result(skel->maps.test_result);
ASSERT_TRUE(skel->bss->test_pass, "test_pass");
close:
close_netns(nstoken);
@@ -286,7 +261,7 @@ close:
static void test_tuntap(struct bpf_program *xdp_prog,
struct bpf_program *tc_prio_1_prog,
struct bpf_program *tc_prio_2_prog,
struct bpf_map *result_map)
bool *test_pass)
{
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
@@ -295,8 +270,7 @@ static void test_tuntap(struct bpf_program *xdp_prog,
int tap_ifindex;
int ret;
if (!clear_test_result(result_map))
return;
*test_pass = false;
ns = netns_new(TAP_NETNS, true);
if (!ASSERT_OK_PTR(ns, "create and open ns"))
@@ -340,7 +314,7 @@ static void test_tuntap(struct bpf_program *xdp_prog,
if (!ASSERT_OK(ret, "write_test_packet"))
goto close;
assert_test_result(result_map);
ASSERT_TRUE(*test_pass, "test_pass");
close:
if (tap_fd >= 0)
@@ -431,37 +405,37 @@ void test_xdp_context_tuntap(void)
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls,
NULL, /* tc prio 2 */
skel->maps.test_result);
&skel->bss->test_pass);
if (test__start_subtest("dynptr_read"))
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls_dynptr_read,
NULL, /* tc prio 2 */
skel->maps.test_result);
&skel->bss->test_pass);
if (test__start_subtest("dynptr_slice"))
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls_dynptr_slice,
NULL, /* tc prio 2 */
skel->maps.test_result);
&skel->bss->test_pass);
if (test__start_subtest("dynptr_write"))
test_tuntap(skel->progs.ing_xdp_zalloc_meta,
skel->progs.ing_cls_dynptr_write,
skel->progs.ing_cls_dynptr_read,
skel->maps.test_result);
&skel->bss->test_pass);
if (test__start_subtest("dynptr_slice_rdwr"))
test_tuntap(skel->progs.ing_xdp_zalloc_meta,
skel->progs.ing_cls_dynptr_slice_rdwr,
skel->progs.ing_cls_dynptr_slice,
skel->maps.test_result);
&skel->bss->test_pass);
if (test__start_subtest("dynptr_offset"))
test_tuntap(skel->progs.ing_xdp_zalloc_meta,
skel->progs.ing_cls_dynptr_offset_wr,
skel->progs.ing_cls_dynptr_offset_rd,
skel->maps.test_result);
&skel->bss->test_pass);
if (test__start_subtest("dynptr_offset_oob"))
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls_dynptr_offset_oob,
skel->progs.ing_cls,
skel->maps.test_result);
&skel->bss->test_pass);
if (test__start_subtest("clone_data_meta_empty_on_data_write"))
test_tuntap_mirred(skel->progs.ing_xdp,
skel->progs.clone_data_meta_empty_on_data_write,

View File

@@ -11,37 +11,36 @@
#define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem
/* Demonstrates how metadata can be passed from an XDP program to a TC program
* using bpf_xdp_adjust_meta.
* For the sake of testing the metadata support in drivers, the XDP program uses
* a fixed-size payload after the Ethernet header as metadata. The TC program
* copies the metadata it receives into a map so it can be checked from
* userspace.
/* Demonstrate passing metadata from XDP to TC using bpf_xdp_adjust_meta.
*
* The XDP program extracts a fixed-size payload following the Ethernet header
* and stores it as packet metadata to test the driver's metadata support. The
* TC program then verifies if the passed metadata is correct.
*/
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__uint(value_size, META_SIZE);
} test_result SEC(".maps");
bool test_pass;
static const __u8 meta_want[META_SIZE] = {
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
};
SEC("tc")
int ing_cls(struct __sk_buff *ctx)
{
__u8 *data, *data_meta;
__u32 key = 0;
__u8 *meta_have = ctx_ptr(ctx, data_meta);
__u8 *data = ctx_ptr(ctx, data);
data_meta = ctx_ptr(ctx, data_meta);
data = ctx_ptr(ctx, data);
if (meta_have + META_SIZE > data)
goto out;
if (data_meta + META_SIZE > data)
return TC_ACT_SHOT;
bpf_map_update_elem(&test_result, &key, data_meta, BPF_ANY);
if (__builtin_memcmp(meta_want, meta_have, META_SIZE))
goto out;
test_pass = true;
out:
return TC_ACT_SHOT;
}
@@ -49,17 +48,17 @@ int ing_cls(struct __sk_buff *ctx)
SEC("tc")
int ing_cls_dynptr_read(struct __sk_buff *ctx)
{
__u8 meta_have[META_SIZE];
struct bpf_dynptr meta;
const __u32 zero = 0;
__u8 *dst;
dst = bpf_map_lookup_elem(&test_result, &zero);
if (!dst)
return TC_ACT_SHOT;
bpf_dynptr_from_skb_meta(ctx, 0, &meta);
bpf_dynptr_read(dst, META_SIZE, &meta, 0, 0);
bpf_dynptr_read(meta_have, META_SIZE, &meta, 0, 0);
if (__builtin_memcmp(meta_want, meta_have, META_SIZE))
goto out;
test_pass = true;
out:
return TC_ACT_SHOT;
}
@@ -86,20 +85,18 @@ SEC("tc")
int ing_cls_dynptr_slice(struct __sk_buff *ctx)
{
struct bpf_dynptr meta;
const __u32 zero = 0;
__u8 *dst, *src;
dst = bpf_map_lookup_elem(&test_result, &zero);
if (!dst)
return TC_ACT_SHOT;
__u8 *meta_have;
bpf_dynptr_from_skb_meta(ctx, 0, &meta);
src = bpf_dynptr_slice(&meta, 0, NULL, META_SIZE);
if (!src)
return TC_ACT_SHOT;
meta_have = bpf_dynptr_slice(&meta, 0, NULL, META_SIZE);
if (!meta_have)
goto out;
__builtin_memcpy(dst, src, META_SIZE);
if (__builtin_memcmp(meta_want, meta_have, META_SIZE))
goto out;
test_pass = true;
out:
return TC_ACT_SHOT;
}
@@ -129,14 +126,12 @@ int ing_cls_dynptr_slice_rdwr(struct __sk_buff *ctx)
SEC("tc")
int ing_cls_dynptr_offset_rd(struct __sk_buff *ctx)
{
struct bpf_dynptr meta;
const __u32 chunk_len = META_SIZE / 4;
const __u32 zero = 0;
__u8 meta_have[META_SIZE];
struct bpf_dynptr meta;
__u8 *dst, *src;
dst = bpf_map_lookup_elem(&test_result, &zero);
if (!dst)
return TC_ACT_SHOT;
dst = meta_have;
/* 1. Regular read */
bpf_dynptr_from_skb_meta(ctx, 0, &meta);
@@ -155,9 +150,14 @@ int ing_cls_dynptr_offset_rd(struct __sk_buff *ctx)
/* 4. Read from a slice starting at an offset */
src = bpf_dynptr_slice(&meta, 2 * chunk_len, NULL, chunk_len);
if (!src)
return TC_ACT_SHOT;
goto out;
__builtin_memcpy(dst, src, chunk_len);
if (__builtin_memcmp(meta_want, meta_have, META_SIZE))
goto out;
test_pass = true;
out:
return TC_ACT_SHOT;
}