net: shrink napi_skb_cache_{put,get}() and napi_skb_cache_get_bulk()

Following loop in napi_skb_cache_put() is unrolled by the compiler
even if CONFIG_KASAN is not enabled:

for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
	kasan_mempool_unpoison_object(nc->skb_cache[i],
				kmem_cache_size(net_hotdata.skbuff_cache));

We have 32 times this sequence, for a total of 384 bytes.

	48 8b 3d 00 00 00 00 	net_hotdata.skbuff_cache,%rdi
	e8 00 00 00 00       	call   kmem_cache_size

This is because kmem_cache_size() is not an inline and not const,
and kasan_unpoison_object_data() is an inline function.

Cache kmem_cache_size() result in a variable, so that
the compiler can remove dead code (and variable) when/if
CONFIG_KASAN is unset.

After this patch, napi_skb_cache_put() is inlined in its callers,
and we avoid one kmem_cache_size() call in napi_skb_cache_get()
and napi_skb_cache_get_bulk().

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://patch.msgid.link/20251016182911.1132792-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet
2025-10-16 18:29:11 +00:00
committed by Jakub Kicinski
parent 6ae022f8ac
commit a5cd3a60aa

View File

@@ -274,6 +274,11 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
}
EXPORT_SYMBOL(__netdev_alloc_frag_align);
/* Cache kmem_cache_size(net_hotdata.skbuff_cache) to help the compiler
* remove dead code (and skbuff_cache_size) when CONFIG_KASAN is unset.
*/
static u32 skbuff_cache_size __read_mostly;
static struct sk_buff *napi_skb_cache_get(void)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
@@ -293,7 +298,7 @@ static struct sk_buff *napi_skb_cache_get(void)
skb = nc->skb_cache[--nc->skb_count];
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
kasan_mempool_unpoison_object(skb, skbuff_cache_size);
return skb;
}
@@ -345,11 +350,9 @@ u32 napi_skb_cache_get_bulk(void **skbs, u32 n)
get:
for (u32 base = nc->skb_count - n, i = 0; i < n; i++) {
u32 cache_size = kmem_cache_size(net_hotdata.skbuff_cache);
skbs[i] = nc->skb_cache[base + i];
kasan_mempool_unpoison_object(skbs[i], cache_size);
kasan_mempool_unpoison_object(skbs[i], skbuff_cache_size);
memset(skbs[i], 0, offsetof(struct sk_buff, tail));
}
@@ -1437,7 +1440,7 @@ static void napi_skb_cache_put(struct sk_buff *skb)
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_mempool_unpoison_object(nc->skb_cache[i],
kmem_cache_size(net_hotdata.skbuff_cache));
skbuff_cache_size);
kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);
@@ -5125,6 +5128,8 @@ void __init skb_init(void)
offsetof(struct sk_buff, cb),
sizeof_field(struct sk_buff, cb),
NULL);
skbuff_cache_size = kmem_cache_size(net_hotdata.skbuff_cache);
net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
sizeof(struct sk_buff_fclones),
0,