mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
bpf: Cleanup unused func args in rqspinlock implementation
cleanup unused function args in check_deadlock* functions.
Fixes: 31158ad02d ("rqspinlock: Add deadlock detection and recovery")
Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
Reviewed-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20251001172702.122838-1-sidchintamaneni@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
0db4941d9d
commit
56b4d16239
@@ -89,15 +89,14 @@ struct rqspinlock_timeout {
|
||||
DEFINE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks);
|
||||
EXPORT_SYMBOL_GPL(rqspinlock_held_locks);
|
||||
|
||||
static bool is_lock_released(rqspinlock_t *lock, u32 mask, struct rqspinlock_timeout *ts)
|
||||
static bool is_lock_released(rqspinlock_t *lock, u32 mask)
|
||||
{
|
||||
if (!(atomic_read_acquire(&lock->val) & (mask)))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
|
||||
struct rqspinlock_timeout *ts)
|
||||
static noinline int check_deadlock_AA(rqspinlock_t *lock)
|
||||
{
|
||||
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
|
||||
int cnt = min(RES_NR_HELD, rqh->cnt);
|
||||
@@ -118,8 +117,7 @@ static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
|
||||
* more locks, which reduce to ABBA). This is not exhaustive, and we rely on
|
||||
* timeouts as the final line of defense.
|
||||
*/
|
||||
static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
|
||||
struct rqspinlock_timeout *ts)
|
||||
static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask)
|
||||
{
|
||||
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
|
||||
int rqh_cnt = min(RES_NR_HELD, rqh->cnt);
|
||||
@@ -142,7 +140,7 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
|
||||
* Let's ensure to break out of this loop if the lock is available for
|
||||
* us to potentially acquire.
|
||||
*/
|
||||
if (is_lock_released(lock, mask, ts))
|
||||
if (is_lock_released(lock, mask))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@@ -198,15 +196,14 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static noinline int check_deadlock(rqspinlock_t *lock, u32 mask,
|
||||
struct rqspinlock_timeout *ts)
|
||||
static noinline int check_deadlock(rqspinlock_t *lock, u32 mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = check_deadlock_AA(lock, mask, ts);
|
||||
ret = check_deadlock_AA(lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = check_deadlock_ABBA(lock, mask, ts);
|
||||
ret = check_deadlock_ABBA(lock, mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -234,7 +231,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
|
||||
*/
|
||||
if (prev + NSEC_PER_MSEC < time) {
|
||||
ts->cur = time;
|
||||
return check_deadlock(lock, mask, ts);
|
||||
return check_deadlock(lock, mask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user