mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
sched/fair: Skip sched_balance_running cmpxchg when balance is not due
The NUMA sched domain sets the SD_SERIALIZE flag by default, allowing
only one NUMA load balancing operation to run system-wide at a time.
Currently, each sched group leader directly under NUMA domain attempts
to acquire the global sched_balance_running flag via cmpxchg() before
checking whether load balancing is due or whether it is the designated
load balancer for that NUMA domain. On systems with a large number
of cores, this causes significant cache contention on the shared
sched_balance_running flag.
This patch reduces unnecessary cmpxchg() operations by first checking
that the balancer is the designated leader for a NUMA domain from
should_we_balance(), and the balance interval has expired before
trying to acquire sched_balance_running to load balance a NUMA
domain.
On a 2-socket Granite Rapids system with sub-NUMA clustering enabled,
running an OLTP workload, 7.8% of total CPU cycles were previously spent
in sched_balance_domain() contending on sched_balance_running before
this change.
: 104 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
: 105 {
: 106 return arch_cmpxchg(&v->counter, old, new);
0.00 : ffffffff81326e6c: xor %eax,%eax
0.00 : ffffffff81326e6e: mov $0x1,%ecx
0.00 : ffffffff81326e73: lock cmpxchg %ecx,0x2394195(%rip) # ffffffff836bb010 <sched_balance_running>
: 110 sched_balance_domains():
: 12234 if (atomic_cmpxchg_acquire(&sched_balance_running, 0, 1))
99.39 : ffffffff81326e7b: test %eax,%eax
0.00 : ffffffff81326e7d: jne ffffffff81326e99 <sched_balance_domains+0x209>
: 12238 if (time_after_eq(jiffies, sd->last_balance + interval)) {
0.00 : ffffffff81326e7f: mov 0x14e2b3a(%rip),%rax # ffffffff828099c0 <jiffies_64>
0.00 : ffffffff81326e86: sub 0x48(%r14),%rax
0.00 : ffffffff81326e8a: cmp %rdx,%rax
After applying this fix, sched_balance_domain() is gone from the profile
and there is a 5% throughput improvement.
[peterz: made it so that redo retains the 'lock' and split out the
CPU_NEWLY_IDLE change to a separate patch]
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Chen Yu <yu.c.chen@intel.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>
Reviewed-by: Srikar Dronamraju <srikar@linux.ibm.com>
Tested-by: Mohini Narkhede <mohini.narkhede@intel.com>
Tested-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Link: https://patch.msgid.link/6fed119b723c71552943bfe5798c93851b30a361.1762800251.git.tim.c.chen@linux.intel.com
This commit is contained in:
@@ -11680,6 +11680,21 @@ static void update_lb_imbalance_stat(struct lb_env *env, struct sched_domain *sd
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This flag serializes load-balancing passes over large domains
|
||||
* (above the NODE topology level) - only one load-balancing instance
|
||||
* may run at a time, to reduce overhead on very large systems with
|
||||
* lots of CPUs and large NUMA distances.
|
||||
*
|
||||
* - Note that load-balancing passes triggered while another one
|
||||
* is executing are skipped and not re-tried.
|
||||
*
|
||||
* - Also note that this does not serialize rebalance_domains()
|
||||
* execution, as non-SD_SERIALIZE domains will still be
|
||||
* load-balanced in parallel.
|
||||
*/
|
||||
static atomic_t sched_balance_running = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
||||
* tasks if there is an imbalance.
|
||||
@@ -11705,6 +11720,7 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
|
||||
.fbq_type = all,
|
||||
.tasks = LIST_HEAD_INIT(env.tasks),
|
||||
};
|
||||
bool need_unlock = false;
|
||||
|
||||
cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
|
||||
|
||||
@@ -11716,6 +11732,14 @@ redo:
|
||||
goto out_balanced;
|
||||
}
|
||||
|
||||
if (!need_unlock && (sd->flags & SD_SERIALIZE) && idle != CPU_NEWLY_IDLE) {
|
||||
int zero = 0;
|
||||
if (!atomic_try_cmpxchg_acquire(&sched_balance_running, &zero, 1))
|
||||
goto out_balanced;
|
||||
|
||||
need_unlock = true;
|
||||
}
|
||||
|
||||
group = sched_balance_find_src_group(&env);
|
||||
if (!group) {
|
||||
schedstat_inc(sd->lb_nobusyg[idle]);
|
||||
@@ -11956,6 +11980,9 @@ out_one_pinned:
|
||||
sd->balance_interval < sd->max_interval)
|
||||
sd->balance_interval *= 2;
|
||||
out:
|
||||
if (need_unlock)
|
||||
atomic_set_release(&sched_balance_running, 0);
|
||||
|
||||
return ld_moved;
|
||||
}
|
||||
|
||||
@@ -12080,21 +12107,6 @@ out_unlock:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This flag serializes load-balancing passes over large domains
|
||||
* (above the NODE topology level) - only one load-balancing instance
|
||||
* may run at a time, to reduce overhead on very large systems with
|
||||
* lots of CPUs and large NUMA distances.
|
||||
*
|
||||
* - Note that load-balancing passes triggered while another one
|
||||
* is executing are skipped and not re-tried.
|
||||
*
|
||||
* - Also note that this does not serialize rebalance_domains()
|
||||
* execution, as non-SD_SERIALIZE domains will still be
|
||||
* load-balanced in parallel.
|
||||
*/
|
||||
static atomic_t sched_balance_running = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Scale the max sched_balance_rq interval with the number of CPUs in the system.
|
||||
* This trades load-balance latency on larger machines for less cross talk.
|
||||
@@ -12150,7 +12162,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
|
||||
/* Earliest time when we have to do rebalance again */
|
||||
unsigned long next_balance = jiffies + 60*HZ;
|
||||
int update_next_balance = 0;
|
||||
int need_serialize, need_decay = 0;
|
||||
int need_decay = 0;
|
||||
u64 max_cost = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -12174,13 +12186,6 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
|
||||
}
|
||||
|
||||
interval = get_sd_balance_interval(sd, busy);
|
||||
|
||||
need_serialize = sd->flags & SD_SERIALIZE;
|
||||
if (need_serialize) {
|
||||
if (atomic_cmpxchg_acquire(&sched_balance_running, 0, 1))
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (time_after_eq(jiffies, sd->last_balance + interval)) {
|
||||
if (sched_balance_rq(cpu, rq, sd, idle, &continue_balancing)) {
|
||||
/*
|
||||
@@ -12194,9 +12199,6 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
|
||||
sd->last_balance = jiffies;
|
||||
interval = get_sd_balance_interval(sd, busy);
|
||||
}
|
||||
if (need_serialize)
|
||||
atomic_set_release(&sched_balance_running, 0);
|
||||
out:
|
||||
if (time_after(next_balance, sd->last_balance + interval)) {
|
||||
next_balance = sd->last_balance + interval;
|
||||
update_next_balance = 1;
|
||||
|
||||
Reference in New Issue
Block a user