sched_ext: Minor cleanups to scx_task_iter

- Use memset() in scx_task_iter_start() instead of zeroing fields individually.

- In scx_task_iter_next(), move __scx_task_iter_maybe_relock() after the batch
  check which is simpler.

- Update comment to reflect that tasks are removed from scx_tasks when dead
  (commit 7900aa699c ("sched_ext: Fix cgroup exit ordering by moving
  sched_ext_free() to finish_task_switch()")).

No functional changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo
2025-11-04 11:42:55 -10:00
parent 023af03cae
commit d723f36e01

View File

@@ -470,16 +470,16 @@ struct scx_task_iter {
* RCU read lock or obtaining a reference count.
*
* All tasks which existed when the iteration started are guaranteed to be
* visited as long as they still exist.
* visited as long as they are not dead.
*/
static void scx_task_iter_start(struct scx_task_iter *iter)
{
memset(iter, 0, sizeof(*iter));
spin_lock_irq(&scx_tasks_lock);
iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
list_add(&iter->cursor.tasks_node, &scx_tasks);
iter->locked_task = NULL;
iter->cnt = 0;
iter->list_locked = true;
}
@@ -545,14 +545,13 @@ static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
struct list_head *cursor = &iter->cursor.tasks_node;
struct sched_ext_entity *pos;
__scx_task_iter_maybe_relock(iter);
if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
scx_task_iter_unlock(iter);
cond_resched();
__scx_task_iter_maybe_relock(iter);
}
__scx_task_iter_maybe_relock(iter);
list_for_each_entry(pos, cursor, tasks_node) {
if (&pos->tasks_node == &scx_tasks)
return NULL;