mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
net: optimize enqueue_to_backlog() for the fast path
Add likely() and unlikely() clauses for the common cases: Device is running. Queue is not full. Queue is less than half capacity. Add max_backlog parameter to skb_flow_limit() to avoid a second READ_ONCE(net_hotdata.max_backlog). skb_flow_limit() does not need the backlog_lock protection, and can be called before we acquire the lock, for even better resistance to attacks. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20251024090517.3289181-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
34164142b5
commit
a086e9860c
@@ -5249,14 +5249,15 @@ void kick_defer_list_purge(unsigned int cpu)
|
||||
int netdev_flow_limit_table_len __read_mostly = (1 << 12);
|
||||
#endif
|
||||
|
||||
static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
|
||||
static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen,
|
||||
int max_backlog)
|
||||
{
|
||||
#ifdef CONFIG_NET_FLOW_LIMIT
|
||||
struct sd_flow_limit *fl;
|
||||
struct softnet_data *sd;
|
||||
unsigned int old_flow, new_flow;
|
||||
const struct softnet_data *sd;
|
||||
struct sd_flow_limit *fl;
|
||||
|
||||
if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
|
||||
if (likely(qlen < (max_backlog >> 1)))
|
||||
return false;
|
||||
|
||||
sd = this_cpu_ptr(&softnet_data);
|
||||
@@ -5301,19 +5302,19 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
||||
u32 tail;
|
||||
|
||||
reason = SKB_DROP_REASON_DEV_READY;
|
||||
if (!netif_running(skb->dev))
|
||||
if (unlikely(!netif_running(skb->dev)))
|
||||
goto bad_dev;
|
||||
|
||||
reason = SKB_DROP_REASON_CPU_BACKLOG;
|
||||
sd = &per_cpu(softnet_data, cpu);
|
||||
|
||||
qlen = skb_queue_len_lockless(&sd->input_pkt_queue);
|
||||
max_backlog = READ_ONCE(net_hotdata.max_backlog);
|
||||
if (unlikely(qlen > max_backlog))
|
||||
if (unlikely(qlen > max_backlog) ||
|
||||
skb_flow_limit(skb, qlen, max_backlog))
|
||||
goto cpu_backlog_drop;
|
||||
backlog_lock_irq_save(sd, &flags);
|
||||
qlen = skb_queue_len(&sd->input_pkt_queue);
|
||||
if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
|
||||
if (likely(qlen <= max_backlog)) {
|
||||
if (!qlen) {
|
||||
/* Schedule NAPI for backlog device. We can use
|
||||
* non atomic operation as we own the queue lock.
|
||||
@@ -5334,6 +5335,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
||||
backlog_unlock_irq_restore(sd, &flags);
|
||||
|
||||
cpu_backlog_drop:
|
||||
reason = SKB_DROP_REASON_CPU_BACKLOG;
|
||||
numa_drop_add(&sd->drop_counters, 1);
|
||||
bad_dev:
|
||||
dev_core_stats_rx_dropped_inc(skb->dev);
|
||||
|
||||
Reference in New Issue
Block a user