mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
RDMA/core: Reduce cond_resched() frequency in __ib_umem_release
The current implementation calls cond_resched() for every SG entry
in __ib_umem_release(), which can increase needless overhead.
This patch introduces RESCHED_LOOP_CNT_THRESHOLD (0x1000) to limit
how often cond_resched() is called. The function now yields the CPU
once every 4096 iterations, and yield at the very first iteration
for lots of small umem case, to reduce scheduling overhead.
Fixes: d056bc45b6 ("RDMA/core: Prevent soft lockup during large user memory region cleanup")
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Link: https://patch.msgid.link/20251126025147.2627-1-lirongqing@baidu.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
committed by
Leon Romanovsky
parent
01dad9ca37
commit
f37e286879
@@ -45,6 +45,8 @@
|
||||
|
||||
#include "uverbs.h"
|
||||
|
||||
#define RESCHED_LOOP_CNT_THRESHOLD 0x1000
|
||||
|
||||
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
|
||||
{
|
||||
bool make_dirty = umem->writable && dirty;
|
||||
@@ -58,7 +60,9 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
|
||||
for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) {
|
||||
unpin_user_page_range_dirty_lock(sg_page(sg),
|
||||
DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
|
||||
cond_resched();
|
||||
|
||||
if (i && !(i % RESCHED_LOOP_CNT_THRESHOLD))
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
sg_free_append_table(&umem->sgt_append);
|
||||
|
||||
Reference in New Issue
Block a user