Skip to content
This repository has been archived by the owner on Jun 18, 2024. It is now read-only.

Commit

Permalink
scx: Refactor consume_dispatch_q()
Browse files Browse the repository at this point in the history
- Factor out consume_local_task() and consume_remote_task()

- s/task_can_run_on_rq/task_can_run_on_remote_rq()/ and improve UP handling.

This will be used to implement targeted task consumption while iterating
DSQs.
  • Loading branch information
htejun committed Apr 22, 2024
1 parent 6de0e16 commit a5be825
Showing 1 changed file with 54 additions and 33 deletions.
87 changes: 54 additions & 33 deletions kernel/sched/ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -2120,38 +2120,13 @@ static void dispatch_to_local_dsq_unlock(struct rq *rq, struct rq_flags *rf,
}
#endif /* CONFIG_SMP */


static bool task_can_run_on_rq(struct task_struct *p, struct rq *rq)
{
return likely(test_rq_online(rq)) && !is_migration_disabled(p) &&
cpumask_test_cpu(cpu_of(rq), p->cpus_ptr);
}

static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf,
struct scx_dispatch_q *dsq)
static void consume_local_task(struct rq *rq, struct scx_dispatch_q *dsq,
struct task_struct *p)
{
struct scx_rq *scx_rq = &rq->scx;
struct task_struct *p;
struct rq *task_rq;
bool moved = false;
retry:
if (list_empty(&dsq->list))
return false;

raw_spin_lock(&dsq->lock);

nldsq_for_each_task(p, dsq) {
task_rq = task_rq(p);
if (rq == task_rq)
goto this_rq;
if (task_can_run_on_rq(p, rq))
goto remote_rq;
}

raw_spin_unlock(&dsq->lock);
return false;
lockdep_assert_held(&dsq->lock); /* released on return */

this_rq:
/* @dsq is locked and @p is on this rq */
WARN_ON_ONCE(p->scx.holding_cpu >= 0);
task_unlink_from_dsq(p, dsq);
Expand All @@ -2160,10 +2135,23 @@ static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf,
dsq_mod_nr(&scx_rq->local_dsq, 1);
p->scx.dsq = &scx_rq->local_dsq;
raw_spin_unlock(&dsq->lock);
return true;
}

remote_rq:
#ifdef CONFIG_SMP
static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq)
{
return likely(test_rq_online(rq)) && !is_migration_disabled(p) &&
cpumask_test_cpu(cpu_of(rq), p->cpus_ptr);
}

static bool consume_remote_task(struct rq *rq, struct rq_flags *rf,
struct scx_dispatch_q *dsq,
struct task_struct *p, struct rq *task_rq)
{
bool moved = false;

lockdep_assert_held(&dsq->lock); /* released on return */

/*
* @dsq is locked and @p is on a remote rq. @p is currently protected by
* @dsq->lock. We want to pull @p to @rq but may deadlock if we grab
Expand All @@ -2184,10 +2172,43 @@ static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf,
moved = move_task_to_local_dsq(rq, p, 0);

double_unlock_balance(rq, task_rq);

return moved;
}
#else /* CONFIG_SMP */
static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq) { return false; }
static bool consume_remote_task(struct rq *rq, struct rq_flags *rf,
struct scx_dispatch_q *dsq,
struct task_struct *p, struct rq *task_rq) { return false; }
#endif /* CONFIG_SMP */
if (likely(moved))
return true;
goto retry;

static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf,
struct scx_dispatch_q *dsq)
{
struct task_struct *p;
retry:
if (list_empty(&dsq->list))
return false;

raw_spin_lock(&dsq->lock);

nldsq_for_each_task(p, dsq) {
struct rq *task_rq = task_rq(p);

if (rq == task_rq) {
consume_local_task(rq, dsq, p);
return true;
}

if (task_can_run_on_remote_rq(p, rq)) {
if (likely(consume_remote_task(rq, rf, dsq, p, task_rq)))
return true;
goto retry;
}
}

raw_spin_unlock(&dsq->lock);
return false;
}

enum dispatch_to_local_dsq_ret {
Expand Down

0 comments on commit a5be825

Please sign in to comment.