From 8271bec9fc1cfe522b1a18cacbefd6712a3d41c2 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:05 -0700 Subject: [PATCH 01/13] bpf: tcp: Make mem flags configurable through bpf_iter_tcp_realloc_batch Prepare for the next patch which needs to be able to choose either GFP_USER or GFP_NOWAIT for calls to bpf_iter_tcp_realloc_batch. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Reviewed-by: Kuniyuki Iwashima Acked-by: Stanislav Fomichev --- net/ipv4/tcp_ipv4.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 429fb34b075e..a0a68b564d9d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3048,12 +3048,12 @@ static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter) } static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter, - unsigned int new_batch_sz) + unsigned int new_batch_sz, gfp_t flags) { struct sock **new_batch; new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz, - GFP_USER | __GFP_NOWARN); + flags | __GFP_NOWARN); if (!new_batch) return -ENOMEM; @@ -3165,7 +3165,8 @@ again: return sk; } - if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) { + if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2, + GFP_USER)) { resized = true; goto again; } @@ -3596,7 +3597,7 @@ static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux) if (err) return err; - err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ); + err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ, GFP_USER); if (err) { bpf_iter_fini_seq_net(priv_data); return err; From cdec67a489d4fdae3e83e04fca0419136a83c4c2 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:06 -0700 Subject: [PATCH 02/13] bpf: tcp: Make sure iter->batch always contains a full bucket snapshot Require that iter->batch always contains a full bucket snapshot. This invariant is important to avoid skipping or repeating sockets during iteration when combined with the next few patches. Before, there were two cases where a call to bpf_iter_tcp_batch may only capture part of a bucket: 1. When bpf_iter_tcp_realloc_batch() returns -ENOMEM. 2. When more sockets are added to the bucket while calling bpf_iter_tcp_realloc_batch(), making the updated batch size insufficient. In cases where the batch size only covers part of a bucket, it is possible to forget which sockets were already visited, especially if we have to process a bucket in more than two batches. This forces us to choose between repeating or skipping sockets, so don't allow this: 1. Stop iteration and propagate -ENOMEM up to userspace if reallocation fails instead of continuing with a partial batch. 2. Try bpf_iter_tcp_realloc_batch() with GFP_USER just as before, but if we still aren't able to capture the full bucket, call bpf_iter_tcp_realloc_batch() again while holding the bucket lock to guarantee the bucket does not change. On the second attempt use GFP_NOWAIT since we hold onto the spin lock. I did some manual testing to exercise the code paths where GFP_NOWAIT is used and where ERR_PTR(err) is returned. I used the realloc test cases included later in this series to trigger a scenario where a realloc happens inside bpf_iter_tcp_batch and made a small code tweak to force the first realloc attempt to allocate a too-small batch, thus requiring another attempt with GFP_NOWAIT. Some printks showed both reallocs with the tests passing: Jun 27 00:00:53 crow kernel: again GFP_USER Jun 27 00:00:53 crow kernel: again GFP_NOWAIT Jun 27 00:00:53 crow kernel: again GFP_USER Jun 27 00:00:53 crow kernel: again GFP_NOWAIT With this setup, I also forced each of the bpf_iter_tcp_realloc_batch calls to return -ENOMEM to ensure that iteration ends and that the read() in userspace fails. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Reviewed-by: Kuniyuki Iwashima Acked-by: Stanislav Fomichev --- net/ipv4/tcp_ipv4.c | 111 +++++++++++++++++++++++++++++++------------- 1 file changed, 78 insertions(+), 33 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a0a68b564d9d..291b24508c2f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3057,7 +3057,7 @@ static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter, if (!new_batch) return -ENOMEM; - bpf_iter_tcp_put_batch(iter); + memcpy(new_batch, iter->batch, sizeof(*iter->batch) * iter->end_sk); kvfree(iter->batch); iter->batch = new_batch; iter->max_sk = new_batch_sz; @@ -3066,69 +3066,95 @@ static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter, } static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq, - struct sock *start_sk) + struct sock **start_sk) { - struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct bpf_tcp_iter_state *iter = seq->private; - struct tcp_iter_state *st = &iter->state; struct hlist_nulls_node *node; unsigned int expected = 1; struct sock *sk; - sock_hold(start_sk); - iter->batch[iter->end_sk++] = start_sk; + sock_hold(*start_sk); + iter->batch[iter->end_sk++] = *start_sk; - sk = sk_nulls_next(start_sk); + sk = sk_nulls_next(*start_sk); + *start_sk = NULL; sk_nulls_for_each_from(sk, node) { if (seq_sk_match(seq, sk)) { if (iter->end_sk < iter->max_sk) { sock_hold(sk); iter->batch[iter->end_sk++] = sk; + } else if (!*start_sk) { + /* Remember where we left off. */ + *start_sk = sk; } expected++; } } - spin_unlock(&hinfo->lhash2[st->bucket].lock); return expected; } static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq, - struct sock *start_sk) + struct sock **start_sk) { - struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct bpf_tcp_iter_state *iter = seq->private; - struct tcp_iter_state *st = &iter->state; struct hlist_nulls_node *node; unsigned int expected = 1; struct sock *sk; - sock_hold(start_sk); - iter->batch[iter->end_sk++] = start_sk; + sock_hold(*start_sk); + iter->batch[iter->end_sk++] = *start_sk; - sk = sk_nulls_next(start_sk); + sk = sk_nulls_next(*start_sk); + *start_sk = NULL; sk_nulls_for_each_from(sk, node) { if (seq_sk_match(seq, sk)) { if (iter->end_sk < iter->max_sk) { sock_hold(sk); iter->batch[iter->end_sk++] = sk; + } else if (!*start_sk) { + /* Remember where we left off. */ + *start_sk = sk; } expected++; } } - spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket)); return expected; } +static unsigned int bpf_iter_fill_batch(struct seq_file *seq, + struct sock **start_sk) +{ + struct bpf_tcp_iter_state *iter = seq->private; + struct tcp_iter_state *st = &iter->state; + + if (st->state == TCP_SEQ_STATE_LISTENING) + return bpf_iter_tcp_listening_batch(seq, start_sk); + else + return bpf_iter_tcp_established_batch(seq, start_sk); +} + +static void bpf_iter_tcp_unlock_bucket(struct seq_file *seq) +{ + struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; + struct bpf_tcp_iter_state *iter = seq->private; + struct tcp_iter_state *st = &iter->state; + + if (st->state == TCP_SEQ_STATE_LISTENING) + spin_unlock(&hinfo->lhash2[st->bucket].lock); + else + spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket)); +} + static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) { struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct bpf_tcp_iter_state *iter = seq->private; struct tcp_iter_state *st = &iter->state; unsigned int expected; - bool resized = false; struct sock *sk; + int err; /* The st->bucket is done. Directly advance to the next * bucket instead of having the tcp_seek_last_pos() to skip @@ -3145,33 +3171,52 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) } } -again: - /* Get a new batch */ iter->cur_sk = 0; iter->end_sk = 0; - iter->st_bucket_done = false; + iter->st_bucket_done = true; sk = tcp_seek_last_pos(seq); if (!sk) return NULL; /* Done */ - if (st->state == TCP_SEQ_STATE_LISTENING) - expected = bpf_iter_tcp_listening_batch(seq, sk); - else - expected = bpf_iter_tcp_established_batch(seq, sk); + expected = bpf_iter_fill_batch(seq, &sk); + if (likely(iter->end_sk == expected)) + goto done; - if (iter->end_sk == expected) { - iter->st_bucket_done = true; - return sk; + /* Batch size was too small. */ + bpf_iter_tcp_unlock_bucket(seq); + bpf_iter_tcp_put_batch(iter); + err = bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2, + GFP_USER); + if (err) + return ERR_PTR(err); + + iter->cur_sk = 0; + iter->end_sk = 0; + + sk = tcp_seek_last_pos(seq); + if (!sk) + return NULL; /* Done */ + + expected = bpf_iter_fill_batch(seq, &sk); + if (likely(iter->end_sk == expected)) + goto done; + + /* Batch size was still too small. Hold onto the lock while we try + * again with a larger batch to make sure the current bucket's size + * does not change in the meantime. + */ + err = bpf_iter_tcp_realloc_batch(iter, expected, GFP_NOWAIT); + if (err) { + bpf_iter_tcp_unlock_bucket(seq); + return ERR_PTR(err); } - if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2, - GFP_USER)) { - resized = true; - goto again; - } - - return sk; + expected = bpf_iter_fill_batch(seq, &sk); + WARN_ON_ONCE(iter->end_sk != expected); +done: + bpf_iter_tcp_unlock_bucket(seq); + return iter->batch[0]; } static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos) From e25ab9b874a4bd8c6e3e5ce66cbe8a1dd4096e2e Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:07 -0700 Subject: [PATCH 03/13] bpf: tcp: Get rid of st_bucket_done Get rid of the st_bucket_done field to simplify TCP iterator state and logic. Before, st_bucket_done could be false if bpf_iter_tcp_batch returned a partial batch; however, with the last patch ("bpf: tcp: Make sure iter->batch always contains a full bucket snapshot"), st_bucket_done == true is equivalent to iter->cur_sk == iter->end_sk. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Reviewed-by: Kuniyuki Iwashima Acked-by: Stanislav Fomichev --- net/ipv4/tcp_ipv4.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 291b24508c2f..1c88b537109f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3020,7 +3020,6 @@ struct bpf_tcp_iter_state { unsigned int end_sk; unsigned int max_sk; struct sock **batch; - bool st_bucket_done; }; struct bpf_iter__tcp { @@ -3043,8 +3042,10 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter) { - while (iter->cur_sk < iter->end_sk) - sock_gen_put(iter->batch[iter->cur_sk++]); + unsigned int cur_sk = iter->cur_sk; + + while (cur_sk < iter->end_sk) + sock_gen_put(iter->batch[cur_sk++]); } static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter, @@ -3161,7 +3162,7 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) * one by one in the current bucket and eventually find out * it has to advance to the next bucket. */ - if (iter->st_bucket_done) { + if (iter->end_sk && iter->cur_sk == iter->end_sk) { st->offset = 0; st->bucket++; if (st->state == TCP_SEQ_STATE_LISTENING && @@ -3173,7 +3174,6 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) iter->cur_sk = 0; iter->end_sk = 0; - iter->st_bucket_done = true; sk = tcp_seek_last_pos(seq); if (!sk) @@ -3321,10 +3321,8 @@ static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v) (void)tcp_prog_seq_show(prog, &meta, v, 0); } - if (iter->cur_sk < iter->end_sk) { + if (iter->cur_sk < iter->end_sk) bpf_iter_tcp_put_batch(iter); - iter->st_bucket_done = false; - } } static const struct seq_operations bpf_iter_tcp_seq_ops = { From efeb820951ebf3778830256496ff72d00d135310 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:08 -0700 Subject: [PATCH 04/13] bpf: tcp: Use bpf_tcp_iter_batch_item for bpf_tcp_iter_state batch items Prepare for the next patch that tracks cookies between iterations by converting struct sock **batch to union bpf_tcp_iter_batch_item *batch inside struct bpf_tcp_iter_state. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Reviewed-by: Kuniyuki Iwashima Acked-by: Stanislav Fomichev --- net/ipv4/tcp_ipv4.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1c88b537109f..28062e292d8b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3014,12 +3014,16 @@ out: } #ifdef CONFIG_BPF_SYSCALL +union bpf_tcp_iter_batch_item { + struct sock *sk; +}; + struct bpf_tcp_iter_state { struct tcp_iter_state state; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; - struct sock **batch; + union bpf_tcp_iter_batch_item *batch; }; struct bpf_iter__tcp { @@ -3045,13 +3049,13 @@ static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter) unsigned int cur_sk = iter->cur_sk; while (cur_sk < iter->end_sk) - sock_gen_put(iter->batch[cur_sk++]); + sock_gen_put(iter->batch[cur_sk++].sk); } static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter, unsigned int new_batch_sz, gfp_t flags) { - struct sock **new_batch; + union bpf_tcp_iter_batch_item *new_batch; new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz, flags | __GFP_NOWARN); @@ -3075,7 +3079,7 @@ static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq, struct sock *sk; sock_hold(*start_sk); - iter->batch[iter->end_sk++] = *start_sk; + iter->batch[iter->end_sk++].sk = *start_sk; sk = sk_nulls_next(*start_sk); *start_sk = NULL; @@ -3083,7 +3087,7 @@ static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq, if (seq_sk_match(seq, sk)) { if (iter->end_sk < iter->max_sk) { sock_hold(sk); - iter->batch[iter->end_sk++] = sk; + iter->batch[iter->end_sk++].sk = sk; } else if (!*start_sk) { /* Remember where we left off. */ *start_sk = sk; @@ -3104,7 +3108,7 @@ static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq, struct sock *sk; sock_hold(*start_sk); - iter->batch[iter->end_sk++] = *start_sk; + iter->batch[iter->end_sk++].sk = *start_sk; sk = sk_nulls_next(*start_sk); *start_sk = NULL; @@ -3112,7 +3116,7 @@ static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq, if (seq_sk_match(seq, sk)) { if (iter->end_sk < iter->max_sk) { sock_hold(sk); - iter->batch[iter->end_sk++] = sk; + iter->batch[iter->end_sk++].sk = sk; } else if (!*start_sk) { /* Remember where we left off. */ *start_sk = sk; @@ -3216,7 +3220,7 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) WARN_ON_ONCE(iter->end_sk != expected); done: bpf_iter_tcp_unlock_bucket(seq); - return iter->batch[0]; + return iter->batch[0].sk; } static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos) @@ -3251,11 +3255,11 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) * st->bucket. See tcp_seek_last_pos(). */ st->offset++; - sock_gen_put(iter->batch[iter->cur_sk++]); + sock_gen_put(iter->batch[iter->cur_sk++].sk); } if (iter->cur_sk < iter->end_sk) - sk = iter->batch[iter->cur_sk]; + sk = iter->batch[iter->cur_sk].sk; else sk = bpf_iter_tcp_batch(seq); From f5080f612a1c587bf636bb23d2a2f4de276d60e4 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:09 -0700 Subject: [PATCH 05/13] bpf: tcp: Avoid socket skips and repeats during iteration Replace the offset-based approach for tracking progress through a bucket in the TCP table with one based on socket cookies. Remember the cookies of unprocessed sockets from the last batch and use this list to pick up where we left off or, in the case that the next socket disappears between reads, find the first socket after that point that still exists in the bucket and resume from there. This approach guarantees that all sockets that existed when iteration began and continue to exist throughout will be visited exactly once. Sockets that are added to the table during iteration may or may not be seen, but if they are they will be seen exactly once. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- net/ipv4/tcp_ipv4.c | 147 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 115 insertions(+), 32 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 28062e292d8b..aef35555792e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -58,6 +58,7 @@ #include #include #include +#include #include #include @@ -3016,6 +3017,7 @@ out: #ifdef CONFIG_BPF_SYSCALL union bpf_tcp_iter_batch_item { struct sock *sk; + __u64 cookie; }; struct bpf_tcp_iter_state { @@ -3046,10 +3048,19 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter) { + union bpf_tcp_iter_batch_item *item; unsigned int cur_sk = iter->cur_sk; + __u64 cookie; - while (cur_sk < iter->end_sk) - sock_gen_put(iter->batch[cur_sk++].sk); + /* Remember the cookies of the sockets we haven't seen yet, so we can + * pick up where we left off next time around. + */ + while (cur_sk < iter->end_sk) { + item = &iter->batch[cur_sk++]; + cookie = sock_gen_cookie(item->sk); + sock_gen_put(item->sk); + item->cookie = cookie; + } } static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter, @@ -3070,6 +3081,106 @@ static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter, return 0; } +static struct sock *bpf_iter_tcp_resume_bucket(struct sock *first_sk, + union bpf_tcp_iter_batch_item *cookies, + int n_cookies) +{ + struct hlist_nulls_node *node; + struct sock *sk; + int i; + + for (i = 0; i < n_cookies; i++) { + sk = first_sk; + sk_nulls_for_each_from(sk, node) + if (cookies[i].cookie == atomic64_read(&sk->sk_cookie)) + return sk; + } + + return NULL; +} + +static struct sock *bpf_iter_tcp_resume_listening(struct seq_file *seq) +{ + struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; + struct bpf_tcp_iter_state *iter = seq->private; + struct tcp_iter_state *st = &iter->state; + unsigned int find_cookie = iter->cur_sk; + unsigned int end_cookie = iter->end_sk; + int resume_bucket = st->bucket; + struct sock *sk; + + if (end_cookie && find_cookie == end_cookie) + ++st->bucket; + + sk = listening_get_first(seq); + iter->cur_sk = 0; + iter->end_sk = 0; + + if (sk && st->bucket == resume_bucket && end_cookie) { + sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie], + end_cookie - find_cookie); + if (!sk) { + spin_unlock(&hinfo->lhash2[st->bucket].lock); + ++st->bucket; + sk = listening_get_first(seq); + } + } + + return sk; +} + +static struct sock *bpf_iter_tcp_resume_established(struct seq_file *seq) +{ + struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; + struct bpf_tcp_iter_state *iter = seq->private; + struct tcp_iter_state *st = &iter->state; + unsigned int find_cookie = iter->cur_sk; + unsigned int end_cookie = iter->end_sk; + int resume_bucket = st->bucket; + struct sock *sk; + + if (end_cookie && find_cookie == end_cookie) + ++st->bucket; + + sk = established_get_first(seq); + iter->cur_sk = 0; + iter->end_sk = 0; + + if (sk && st->bucket == resume_bucket && end_cookie) { + sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie], + end_cookie - find_cookie); + if (!sk) { + spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket)); + ++st->bucket; + sk = established_get_first(seq); + } + } + + return sk; +} + +static struct sock *bpf_iter_tcp_resume(struct seq_file *seq) +{ + struct bpf_tcp_iter_state *iter = seq->private; + struct tcp_iter_state *st = &iter->state; + struct sock *sk = NULL; + + switch (st->state) { + case TCP_SEQ_STATE_LISTENING: + sk = bpf_iter_tcp_resume_listening(seq); + if (sk) + break; + st->bucket = 0; + st->state = TCP_SEQ_STATE_ESTABLISHED; + fallthrough; + case TCP_SEQ_STATE_ESTABLISHED: + sk = bpf_iter_tcp_resume_established(seq); + break; + } + + return sk; +} + static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq, struct sock **start_sk) { @@ -3154,32 +3265,12 @@ static void bpf_iter_tcp_unlock_bucket(struct seq_file *seq) static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) { - struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; struct bpf_tcp_iter_state *iter = seq->private; - struct tcp_iter_state *st = &iter->state; unsigned int expected; struct sock *sk; int err; - /* The st->bucket is done. Directly advance to the next - * bucket instead of having the tcp_seek_last_pos() to skip - * one by one in the current bucket and eventually find out - * it has to advance to the next bucket. - */ - if (iter->end_sk && iter->cur_sk == iter->end_sk) { - st->offset = 0; - st->bucket++; - if (st->state == TCP_SEQ_STATE_LISTENING && - st->bucket > hinfo->lhash2_mask) { - st->state = TCP_SEQ_STATE_ESTABLISHED; - st->bucket = 0; - } - } - - iter->cur_sk = 0; - iter->end_sk = 0; - - sk = tcp_seek_last_pos(seq); + sk = bpf_iter_tcp_resume(seq); if (!sk) return NULL; /* Done */ @@ -3195,10 +3286,7 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) if (err) return ERR_PTR(err); - iter->cur_sk = 0; - iter->end_sk = 0; - - sk = tcp_seek_last_pos(seq); + sk = bpf_iter_tcp_resume(seq); if (!sk) return NULL; /* Done */ @@ -3250,11 +3338,6 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) * meta.seq_num is used instead. */ st->num++; - /* Move st->offset to the next sk in the bucket such that - * the future start() will resume at st->offset in - * st->bucket. See tcp_seek_last_pos(). - */ - st->offset++; sock_gen_put(iter->batch[iter->cur_sk++].sk); } From da1d987d3b39a91e53be888c29610f57fb67bbe0 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:10 -0700 Subject: [PATCH 06/13] selftests/bpf: Add tests for bucket resume logic in listening sockets Replicate the set of test cases used for UDP socket iterators to test similar scenarios for TCP listening sockets. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- .../bpf/prog_tests/sock_iter_batch.c | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index a4517bee34d5..2adacd91fdf8 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -358,6 +358,53 @@ static struct test_case resume_tests[] = { .family = AF_INET6, .test = force_realloc, }, + { + .description = "tcp: resume after removing a seen socket (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_seen, + }, + { + .description = "tcp: resume after removing one unseen socket (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_unseen, + }, + { + .description = "tcp: resume after removing all unseen sockets (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_all, + }, + { + .description = "tcp: resume after adding a few sockets (listening)", + .init_socks = nr_soreuse, + .max_socks = nr_soreuse, + .sock_type = SOCK_STREAM, + /* Use AF_INET so that new sockets are added to the head of the + * bucket's list. + */ + .family = AF_INET, + .test = add_some, + }, + { + .description = "tcp: force a realloc to occur (listening)", + .init_socks = init_batch_size, + .max_socks = init_batch_size * 2, + .sock_type = SOCK_STREAM, + /* Use AF_INET6 so that new sockets are added to the tail of the + * bucket's list, needing to be added to the next batch to force + * a realloc. + */ + .family = AF_INET6, + .test = force_realloc, + }, }; static void do_resume_test(struct test_case *tc) From 346066c3278f3baa61b1abc8a03721ed2684efe7 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:11 -0700 Subject: [PATCH 07/13] selftests/bpf: Allow for iteration over multiple ports Prepare to test TCP socket iteration over both listening and established sockets by allowing the BPF iterator programs to skip the port check. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c | 7 ++----- tools/testing/selftests/bpf/progs/sock_iter_batch.c | 4 ++++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index 2adacd91fdf8..0d0f1b4debff 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -416,7 +416,6 @@ static void do_resume_test(struct test_case *tc) int err, iter_fd = -1; const char *addr; int *fds = NULL; - int local_port; counts = calloc(tc->max_socks, sizeof(*counts)); if (!ASSERT_OK_PTR(counts, "counts")) @@ -431,10 +430,8 @@ static void do_resume_test(struct test_case *tc) tc->init_socks); if (!ASSERT_OK_PTR(fds, "start_reuseport_server")) goto done; - local_port = get_socket_local_port(*fds); - if (!ASSERT_GE(local_port, 0, "get_socket_local_port")) - goto done; - skel->rodata->ports[0] = ntohs(local_port); + skel->rodata->ports[0] = 0; + skel->rodata->ports[1] = 0; skel->rodata->sf = tc->family; err = sock_iter_batch__load(skel); diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c index 8f483337e103..40dce6a38c30 100644 --- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c @@ -52,6 +52,8 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) idx = 0; else if (sk->sk_num == ports[1]) idx = 1; + else if (!ports[0] && !ports[1]) + idx = 0; else return 0; @@ -92,6 +94,8 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx) idx = 0; else if (sk->sk_num == ports[1]) idx = 1; + else if (!ports[0] && !ports[1]) + idx = 0; else return 0; From f00468124a08a7ecd6f2ed932c57d86a1fc249db Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:12 -0700 Subject: [PATCH 08/13] selftests/bpf: Allow for iteration over multiple states Add parentheses around loopback address check to fix up logic and make the socket state filter configurable for the TCP socket iterators. Iterators can skip the socket state check by setting ss to 0. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- .../selftests/bpf/prog_tests/sock_iter_batch.c | 3 +++ tools/testing/selftests/bpf/progs/sock_iter_batch.c | 11 ++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index 0d0f1b4debff..4e15a0c2f237 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -433,6 +433,7 @@ static void do_resume_test(struct test_case *tc) skel->rodata->ports[0] = 0; skel->rodata->ports[1] = 0; skel->rodata->sf = tc->family; + skel->rodata->ss = 0; err = sock_iter_batch__load(skel); if (!ASSERT_OK(err, "sock_iter_batch__load")) @@ -498,6 +499,8 @@ static void do_test(int sock_type, bool onebyone) skel->rodata->ports[i] = ntohs(local_port); } skel->rodata->sf = AF_INET6; + if (sock_type == SOCK_STREAM) + skel->rodata->ss = TCP_LISTEN; err = sock_iter_batch__load(skel); if (!ASSERT_OK(err, "sock_iter_batch__load")) diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c index 40dce6a38c30..a36361e4a5de 100644 --- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c @@ -23,6 +23,7 @@ static bool ipv4_addr_loopback(__be32 a) } volatile const unsigned int sf; +volatile const unsigned int ss; volatile const __u16 ports[2]; unsigned int bucket[2]; @@ -42,10 +43,10 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) sock_cookie = bpf_get_socket_cookie(sk); sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != sf || - sk->sk_state != TCP_LISTEN || - sk->sk_family == AF_INET6 ? + (ss && sk->sk_state != ss) || + (sk->sk_family == AF_INET6 ? !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) : - !ipv4_addr_loopback(sk->sk_rcv_saddr)) + !ipv4_addr_loopback(sk->sk_rcv_saddr))) return 0; if (sk->sk_num == ports[0]) @@ -85,9 +86,9 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx) sock_cookie = bpf_get_socket_cookie(sk); sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != sf || - sk->sk_family == AF_INET6 ? + (sk->sk_family == AF_INET6 ? !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) : - !ipv4_addr_loopback(sk->sk_rcv_saddr)) + !ipv4_addr_loopback(sk->sk_rcv_saddr))) return 0; if (sk->sk_num == ports[0]) From 08327292e7093de9b68ef02fe975738799ceedf4 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:13 -0700 Subject: [PATCH 09/13] selftests/bpf: Make ehash buckets configurable in socket iterator tests Prepare for bucket resume tests for established TCP sockets by making the number of ehash buckets configurable. Subsequent patches force all established sockets into the same bucket by setting ehash_buckets to one. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- .../bpf/prog_tests/sock_iter_batch.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index 4e15a0c2f237..60b45685ef72 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -6,6 +6,7 @@ #include "sock_iter_batch.skel.h" #define TEST_NS "sock_iter_batch_netns" +#define TEST_CHILD_NS "sock_iter_batch_child_netns" static const int init_batch_size = 16; static const int nr_soreuse = 4; @@ -304,6 +305,7 @@ struct test_case { int *socks, int socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd); const char *description; + int ehash_buckets; int init_socks; int max_socks; int sock_type; @@ -410,13 +412,25 @@ static struct test_case resume_tests[] = { static void do_resume_test(struct test_case *tc) { struct sock_iter_batch *skel = NULL; + struct sock_count *counts = NULL; static const __u16 port = 10001; + struct nstoken *nstoken = NULL; struct bpf_link *link = NULL; - struct sock_count *counts; int err, iter_fd = -1; const char *addr; int *fds = NULL; + if (tc->ehash_buckets) { + SYS_NOFAIL("ip netns del " TEST_CHILD_NS); + SYS(done, "sysctl -wq net.ipv4.tcp_child_ehash_entries=%d", + tc->ehash_buckets); + SYS(done, "ip netns add %s", TEST_CHILD_NS); + SYS(done, "ip -net %s link set dev lo up", TEST_CHILD_NS); + nstoken = open_netns(TEST_CHILD_NS); + if (!ASSERT_OK_PTR(nstoken, "open_child_netns")) + goto done; + } + counts = calloc(tc->max_socks, sizeof(*counts)); if (!ASSERT_OK_PTR(counts, "counts")) goto done; @@ -453,6 +467,9 @@ static void do_resume_test(struct test_case *tc) tc->test(tc->family, tc->sock_type, addr, port, fds, tc->init_socks, counts, tc->max_socks, link, iter_fd); done: + close_netns(nstoken); + SYS_NOFAIL("ip netns del " TEST_CHILD_NS); + SYS_NOFAIL("sysctl -w net.ipv4.tcp_child_ehash_entries=0"); free(counts); free_fds(fds, tc->init_socks); if (iter_fd >= 0) From 07ebabbbfe9b4c95e8f1f144f21c07fe830fa501 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:14 -0700 Subject: [PATCH 10/13] selftests/bpf: Create established sockets in socket iterator tests Prepare for bucket resume tests for established TCP sockets by creating established sockets. Collect socket fds from connect() and accept() sides and pass them to test cases. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- .../bpf/prog_tests/sock_iter_batch.c | 89 ++++++++++++++++++- 1 file changed, 85 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index 60b45685ef72..0ccc90d1d1ef 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2024 Meta +#include #include #include "network_helpers.h" #include "sock_iter_batch.skel.h" @@ -153,8 +154,71 @@ static void check_n_were_seen_once(int *fds, int fds_len, int n, ASSERT_EQ(seen_once, n, "seen_once"); } +static int accept_from_one(struct pollfd *server_poll_fds, + int server_poll_fds_len) +{ + static const int poll_timeout_ms = 5000; /* 5s */ + int ret; + int i; + + ret = poll(server_poll_fds, server_poll_fds_len, poll_timeout_ms); + if (!ASSERT_EQ(ret, 1, "poll")) + return -1; + + for (i = 0; i < server_poll_fds_len; i++) + if (server_poll_fds[i].revents & POLLIN) + return accept(server_poll_fds[i].fd, NULL, NULL); + + return -1; +} + +static int *connect_to_server(int family, int sock_type, const char *addr, + __u16 port, int nr_connects, int *server_fds, + int server_fds_len) +{ + struct pollfd *server_poll_fds = NULL; + int *established_socks = NULL; + int i; + + server_poll_fds = calloc(server_fds_len, sizeof(*server_poll_fds)); + if (!ASSERT_OK_PTR(server_poll_fds, "server_poll_fds")) + return NULL; + + for (i = 0; i < server_fds_len; i++) { + server_poll_fds[i].fd = server_fds[i]; + server_poll_fds[i].events = POLLIN; + } + + i = 0; + + established_socks = malloc(sizeof(*established_socks) * nr_connects*2); + if (!ASSERT_OK_PTR(established_socks, "established_socks")) + goto error; + + while (nr_connects--) { + established_socks[i] = connect_to_addr_str(family, sock_type, + addr, port, NULL); + if (!ASSERT_OK_FD(established_socks[i], "connect_to_addr_str")) + goto error; + i++; + established_socks[i] = accept_from_one(server_poll_fds, + server_fds_len); + if (!ASSERT_OK_FD(established_socks[i], "accept_from_one")) + goto error; + i++; + } + + free(server_poll_fds); + return established_socks; +error: + free_fds(established_socks, i); + free(server_poll_fds); + return NULL; +} + static void remove_seen(int family, int sock_type, const char *addr, __u16 port, - int *socks, int socks_len, struct sock_count *counts, + int *socks, int socks_len, int *established_socks, + int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { int close_idx; @@ -185,6 +249,7 @@ static void remove_seen(int family, int sock_type, const char *addr, __u16 port, static void remove_unseen(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, + int *established_socks, int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { @@ -217,6 +282,7 @@ static void remove_unseen(int family, int sock_type, const char *addr, static void remove_all(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, + int *established_socks, int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { @@ -244,7 +310,8 @@ static void remove_all(int family, int sock_type, const char *addr, } static void add_some(int family, int sock_type, const char *addr, __u16 port, - int *socks, int socks_len, struct sock_count *counts, + int *socks, int socks_len, int *established_socks, + int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { int *new_socks = NULL; @@ -274,6 +341,7 @@ done: static void force_realloc(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, + int *established_socks, int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd) { @@ -302,10 +370,12 @@ done: struct test_case { void (*test)(int family, int sock_type, const char *addr, __u16 port, - int *socks, int socks_len, struct sock_count *counts, + int *socks, int socks_len, int *established_socks, + int established_socks_len, struct sock_count *counts, int counts_len, struct bpf_link *link, int iter_fd); const char *description; int ehash_buckets; + int connections; int init_socks; int max_socks; int sock_type; @@ -416,6 +486,7 @@ static void do_resume_test(struct test_case *tc) static const __u16 port = 10001; struct nstoken *nstoken = NULL; struct bpf_link *link = NULL; + int *established_fds = NULL; int err, iter_fd = -1; const char *addr; int *fds = NULL; @@ -444,6 +515,14 @@ static void do_resume_test(struct test_case *tc) tc->init_socks); if (!ASSERT_OK_PTR(fds, "start_reuseport_server")) goto done; + if (tc->connections) { + established_fds = connect_to_server(tc->family, tc->sock_type, + addr, port, + tc->connections, fds, + tc->init_socks); + if (!ASSERT_OK_PTR(established_fds, "connect_to_server")) + goto done; + } skel->rodata->ports[0] = 0; skel->rodata->ports[1] = 0; skel->rodata->sf = tc->family; @@ -465,13 +544,15 @@ static void do_resume_test(struct test_case *tc) goto done; tc->test(tc->family, tc->sock_type, addr, port, fds, tc->init_socks, - counts, tc->max_socks, link, iter_fd); + established_fds, tc->connections*2, counts, tc->max_socks, + link, iter_fd); done: close_netns(nstoken); SYS_NOFAIL("ip netns del " TEST_CHILD_NS); SYS_NOFAIL("sysctl -w net.ipv4.tcp_child_ehash_entries=0"); free(counts); free_fds(fds, tc->init_socks); + free_fds(established_fds, tc->connections*2); if (iter_fd >= 0) close(iter_fd); bpf_link__destroy(link); From 8fc0c5a82d04e1582b666e3c407340e66493608f Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:15 -0700 Subject: [PATCH 11/13] selftests/bpf: Create iter_tcp_destroy test program Prepare for bucket resume tests for established TCP sockets by creating a program to immediately destroy and remove sockets from the TCP ehash table, since close() is not deterministic. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- .../selftests/bpf/progs/sock_iter_batch.c | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c index a36361e4a5de..77966ded5467 100644 --- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c @@ -70,6 +70,27 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) return 0; } +volatile const __u64 destroy_cookie; + +SEC("iter/tcp") +int iter_tcp_destroy(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = (struct sock_common *)ctx->sk_common; + __u64 sock_cookie; + + if (!sk_common) + return 0; + + sock_cookie = bpf_get_socket_cookie(sk_common); + if (sock_cookie != destroy_cookie) + return 0; + + bpf_sock_destroy(sk_common); + bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie)); + + return 0; +} + #define udp_sk(ptr) container_of(ptr, struct udp_sock, inet.sk) SEC("iter/udp") From f126f0ce7c830d538ca047f3a3bdc64669812d9c Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 14 Jul 2025 11:09:16 -0700 Subject: [PATCH 12/13] selftests/bpf: Add tests for bucket resume logic in established sockets Replicate the set of test cases used for UDP socket iterators to test similar scenarios for TCP established sockets. Signed-off-by: Jordan Rife Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- .../bpf/prog_tests/sock_iter_batch.c | 293 ++++++++++++++++++ 1 file changed, 293 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index 0ccc90d1d1ef..27781df8f2fb 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -120,6 +120,45 @@ done: return nth_sock_idx; } +static void destroy(int fd) +{ + struct sock_iter_batch *skel = NULL; + __u64 cookie = socket_cookie(fd); + struct bpf_link *link = NULL; + int iter_fd = -1; + int nread; + __u64 out; + + skel = sock_iter_batch__open(); + if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open")) + goto done; + + skel->rodata->destroy_cookie = cookie; + + if (!ASSERT_OK(sock_iter_batch__load(skel), "sock_iter_batch__load")) + goto done; + + link = bpf_program__attach_iter(skel->progs.iter_tcp_destroy, NULL); + if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter")) + goto done; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create")) + goto done; + + /* Delete matching socket. */ + nread = read(iter_fd, &out, sizeof(out)); + ASSERT_GE(nread, 0, "nread"); + if (nread) + ASSERT_EQ(out, cookie, "cookie matches"); +done: + if (iter_fd >= 0) + close(iter_fd); + bpf_link__destroy(link); + sock_iter_batch__destroy(skel); + close(fd); +} + static int get_seen_count(int fd, struct sock_count counts[], int n) { __u64 cookie = socket_cookie(fd); @@ -247,6 +286,43 @@ static void remove_seen(int family, int sock_type, const char *addr, __u16 port, counts_len); } +static void remove_seen_established(int family, int sock_type, const char *addr, + __u16 port, int *listen_socks, + int listen_socks_len, int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int close_idx; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Leave one established socket. */ + read_n(iter_fd, established_socks_len - 1, counts, counts_len); + + /* Close a socket we've already seen to remove it from the bucket. */ + close_idx = get_nth_socket(established_socks, established_socks_len, + link, listen_socks_len + 1); + if (!ASSERT_GE(close_idx, 0, "close_idx")) + return; + destroy(established_socks[close_idx]); + established_socks[close_idx] = -1; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure the last socket wasn't skipped and that there were no + * repeats. + */ + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len - 1, counts, counts_len); +} + static void remove_unseen(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, int *established_socks, int established_socks_len, @@ -280,6 +356,51 @@ static void remove_unseen(int family, int sock_type, const char *addr, counts_len); } +static void remove_unseen_established(int family, int sock_type, + const char *addr, __u16 port, + int *listen_socks, int listen_socks_len, + int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int close_idx; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Iterate through the first established socket. */ + read_n(iter_fd, 1, counts, counts_len); + + /* Make sure we saw one established socks. */ + check_n_were_seen_once(established_socks, established_socks_len, 1, + counts, counts_len); + + /* Close what would be the next socket in the bucket to exercise the + * condition where we need to skip past the first cookie we remembered. + */ + close_idx = get_nth_socket(established_socks, established_socks_len, + link, listen_socks_len + 1); + if (!ASSERT_GE(close_idx, 0, "close_idx")) + return; + + destroy(established_socks[close_idx]); + established_socks[close_idx] = -1; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure the remaining sockets were seen exactly once and that we + * didn't repeat the socket that was already seen. + */ + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len - 1, counts, counts_len); +} + static void remove_all(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, int *established_socks, int established_socks_len, @@ -309,6 +430,54 @@ static void remove_all(int family, int sock_type, const char *addr, ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n"); } +static void remove_all_established(int family, int sock_type, const char *addr, + __u16 port, int *listen_socks, + int listen_socks_len, int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + int *close_idx = NULL; + int i; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Iterate through the first established socket. */ + read_n(iter_fd, 1, counts, counts_len); + + /* Make sure we saw one established socks. */ + check_n_were_seen_once(established_socks, established_socks_len, 1, + counts, counts_len); + + /* Close all remaining sockets to exhaust the list of saved cookies and + * exit without putting any sockets into the batch on the next read. + */ + close_idx = malloc(sizeof(int) * (established_socks_len - 1)); + if (!ASSERT_OK_PTR(close_idx, "close_idx malloc")) + return; + for (i = 0; i < established_socks_len - 1; i++) { + close_idx[i] = get_nth_socket(established_socks, + established_socks_len, link, + listen_socks_len + i); + if (!ASSERT_GE(close_idx[i], 0, "close_idx")) + return; + } + + for (i = 0; i < established_socks_len - 1; i++) { + destroy(established_socks[close_idx[i]]); + established_socks[close_idx[i]] = -1; + } + + /* Make sure there are no more sockets returned */ + ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n"); + free(close_idx); +} + static void add_some(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, int *established_socks, int established_socks_len, struct sock_count *counts, @@ -339,6 +508,49 @@ done: free_fds(new_socks, socks_len); } +static void add_some_established(int family, int sock_type, const char *addr, + __u16 port, int *listen_socks, + int listen_socks_len, int *established_socks, + int established_socks_len, + struct sock_count *counts, + int counts_len, struct bpf_link *link, + int iter_fd) +{ + int *new_socks = NULL; + + /* Iterate through all listening sockets. */ + read_n(iter_fd, listen_socks_len, counts, counts_len); + + /* Make sure we saw all listening sockets exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + + /* Iterate through the first established_socks_len - 1 sockets. */ + read_n(iter_fd, established_socks_len - 1, counts, counts_len); + + /* Make sure we saw established_socks_len - 1 sockets exactly once. */ + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len - 1, counts, counts_len); + + /* Double the number of established sockets in the bucket. */ + new_socks = connect_to_server(family, sock_type, addr, port, + established_socks_len / 2, listen_socks, + listen_socks_len); + if (!ASSERT_OK_PTR(new_socks, "connect_to_server")) + goto done; + + /* Iterate through the rest of the sockets. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure each of the original sockets was seen exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len, counts, counts_len); +done: + free_fds(new_socks, established_socks_len); +} + static void force_realloc(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, int *established_socks, int established_socks_len, @@ -368,6 +580,24 @@ done: free_fds(new_socks, socks_len); } +static void force_realloc_established(int family, int sock_type, + const char *addr, __u16 port, + int *listen_socks, int listen_socks_len, + int *established_socks, + int established_socks_len, + struct sock_count *counts, int counts_len, + struct bpf_link *link, int iter_fd) +{ + /* Iterate through all sockets to trigger a realloc. */ + read_n(iter_fd, -1, counts, counts_len); + + /* Make sure each socket was seen exactly once. */ + check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len, + counts, counts_len); + check_n_were_seen_once(established_socks, established_socks_len, + established_socks_len, counts, counts_len); +} + struct test_case { void (*test)(int family, int sock_type, const char *addr, __u16 port, int *socks, int socks_len, int *established_socks, @@ -477,6 +707,69 @@ static struct test_case resume_tests[] = { .family = AF_INET6, .test = force_realloc, }, + { + .description = "tcp: resume after removing a seen socket (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_seen_established, + }, + { + .description = "tcp: resume after removing one unseen socket (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_unseen_established, + }, + { + .description = "tcp: resume after removing all unseen sockets (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = remove_all_established, + }, + { + .description = "tcp: resume after adding a few sockets (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + .connections = nr_soreuse, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse * 3, + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = add_some_established, + }, + { + .description = "tcp: force a realloc to occur (established)", + /* Force all established sockets into one bucket */ + .ehash_buckets = 1, + /* Bucket size will need to double when going from listening to + * established sockets. + */ + .connections = init_batch_size, + .init_socks = nr_soreuse, + /* Room for connect()ed and accept()ed sockets */ + .max_socks = nr_soreuse + (init_batch_size * 2), + .sock_type = SOCK_STREAM, + .family = AF_INET6, + .test = force_realloc_established, + }, }; static void do_resume_test(struct test_case *tc) From ef57dc6f52e4949527f82a456cb9a637a55209ea Mon Sep 17 00:00:00 2001 From: Song Yoong Siang Date: Wed, 16 Jul 2025 23:48:46 +0800 Subject: [PATCH 13/13] doc: xdp: Clarify driver implementation for XDP Rx metadata Clarify that drivers must remove device-reserved metadata from the data_meta area before passing frames to XDP programs. Additionally, expand the explanation of how userspace and BPF programs should coordinate the use of METADATA_SIZE, and add a detailed diagram to illustrate pointer adjustments and metadata layout. Also describe the requirements and constraints enforced by bpf_xdp_adjust_meta(). Signed-off-by: Song Yoong Siang Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20250716154846.3513575-1-yoong.siang.song@intel.com --- Documentation/networking/xdp-rx-metadata.rst | 33 ++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/Documentation/networking/xdp-rx-metadata.rst b/Documentation/networking/xdp-rx-metadata.rst index a6e0ece18be5..ce96f4c99505 100644 --- a/Documentation/networking/xdp-rx-metadata.rst +++ b/Documentation/networking/xdp-rx-metadata.rst @@ -120,6 +120,39 @@ It is possible to query which kfunc the particular netdev implements via netlink. See ``xdp-rx-metadata-features`` attribute set in ``Documentation/netlink/specs/netdev.yaml``. +Driver Implementation +===================== + +Certain devices may prepend metadata to received packets. However, as of now, +``AF_XDP`` lacks the ability to communicate the size of the ``data_meta`` area +to the consumer. Therefore, it is the responsibility of the driver to copy any +device-reserved metadata out from the metadata area and ensure that +``xdp_buff->data_meta`` is pointing to ``xdp_buff->data`` before presenting the +frame to the XDP program. This is necessary so that, after the XDP program +adjusts the metadata area, the consumer can reliably retrieve the metadata +address using ``METADATA_SIZE`` offset. + +The following diagram shows how custom metadata is positioned relative to the +packet data and how pointers are adjusted for metadata access:: + + |<-- bpf_xdp_adjust_meta(xdp_buff, -METADATA_SIZE) --| + new xdp_buff->data_meta old xdp_buff->data_meta + | | + | xdp_buff->data + | | + +----------+----------------------------------------------------+------+ + | headroom | custom metadata | data | + +----------+----------------------------------------------------+------+ + | | + | xdp_desc->addr + |<------ xsk_umem__get_data() - METADATA_SIZE -------| + +``bpf_xdp_adjust_meta`` ensures that ``METADATA_SIZE`` is aligned to 4 bytes, +does not exceed 252 bytes, and leaves sufficient space for building the +xdp_frame. If these conditions are not met, it returns a negative error. In this +case, the BPF program should not proceed to populate data into the ``data_meta`` +area. + Example =======