mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
sched_ext: Separate out scx_kick_cpu() and add @sch to it
In preparation for multiple scheduler support, separate out scx_kick_cpu() from scx_bpf_kick_cpu() and add the @sch parameter to it. scx_bpf_kick_cpu() now acquires an RCU read lock, reads $scx_root, and calls scx_kick_cpu() with it if non-NULL. The passed in @sch parameter is not used yet. Internal uses of scx_bpf_kick_cpu() are converted to scx_kick_cpu(). Where $sch is available, it's used. In the pick_task_scx() path where no associated scheduler can be identified, $scx_root is used directly. Note that $scx_root cannot be NULL in this case. Reviewed-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
@@ -136,7 +136,7 @@ static struct kset *scx_kset;
|
||||
#include <trace/events/sched_ext.h>
|
||||
|
||||
static void process_ddsp_deferred_locals(struct rq *rq);
|
||||
static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
|
||||
static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
|
||||
static void scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
|
||||
s64 exit_code, const char *fmt, va_list args);
|
||||
|
||||
@@ -2125,10 +2125,10 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
|
||||
* balance(), we want to complete this scheduling cycle and then
|
||||
* start a new one. IOW, we want to call resched_curr() on the
|
||||
* next, most likely idle, task, not the current one. Use
|
||||
* scx_bpf_kick_cpu() for deferred kicking.
|
||||
* scx_kick_cpu() for deferred kicking.
|
||||
*/
|
||||
if (unlikely(!--nr_loops)) {
|
||||
scx_bpf_kick_cpu(cpu_of(rq), 0);
|
||||
scx_kick_cpu(sch, cpu_of(rq), 0);
|
||||
break;
|
||||
}
|
||||
} while (dspc->nr_tasks);
|
||||
@@ -2417,7 +2417,8 @@ static struct task_struct *pick_task_scx(struct rq *rq)
|
||||
p = first_local_task(rq);
|
||||
if (!p) {
|
||||
if (kick_idle)
|
||||
scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
|
||||
scx_kick_cpu(rcu_dereference_sched(scx_root),
|
||||
cpu_of(rq), SCX_KICK_IDLE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -3721,7 +3722,7 @@ static void scx_clear_softlockup(void)
|
||||
*
|
||||
* - pick_next_task() suppresses zero slice warning.
|
||||
*
|
||||
* - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
|
||||
* - scx_kick_cpu() is disabled to avoid irq_work malfunction during PM
|
||||
* operations.
|
||||
*
|
||||
* - scx_prio_less() reverts to the default core_sched_at order.
|
||||
@@ -5809,17 +5810,7 @@ static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
|
||||
|
||||
__bpf_kfunc_start_defs();
|
||||
|
||||
/**
|
||||
* scx_bpf_kick_cpu - Trigger reschedule on a CPU
|
||||
* @cpu: cpu to kick
|
||||
* @flags: %SCX_KICK_* flags
|
||||
*
|
||||
* Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
|
||||
* trigger rescheduling on a busy CPU. This can be called from any online
|
||||
* scx_ops operation and the actual kicking is performed asynchronously through
|
||||
* an irq work.
|
||||
*/
|
||||
__bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
|
||||
static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags)
|
||||
{
|
||||
struct rq *this_rq;
|
||||
unsigned long irq_flags;
|
||||
@@ -5872,6 +5863,26 @@ out:
|
||||
local_irq_restore(irq_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* scx_bpf_kick_cpu - Trigger reschedule on a CPU
|
||||
* @cpu: cpu to kick
|
||||
* @flags: %SCX_KICK_* flags
|
||||
*
|
||||
* Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
|
||||
* trigger rescheduling on a busy CPU. This can be called from any online
|
||||
* scx_ops operation and the actual kicking is performed asynchronously through
|
||||
* an irq work.
|
||||
*/
|
||||
__bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
|
||||
{
|
||||
struct scx_sched *sch;
|
||||
|
||||
guard(rcu)();
|
||||
sch = rcu_dereference(scx_root);
|
||||
if (likely(sch))
|
||||
scx_kick_cpu(sch, cpu, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* scx_bpf_dsq_nr_queued - Return the number of queued tasks
|
||||
* @dsq_id: id of the DSQ
|
||||
|
||||
Reference in New Issue
Block a user