]> git.dujemihanovic.xyz Git - linux.git/commitdiff
sched/fair: Fix forced idle sibling starvation corner case
authorVineeth Pillai <viremana@linux.microsoft.com>
Tue, 17 Nov 2020 23:19:38 +0000 (18:19 -0500)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 12 May 2021 09:43:29 +0000 (11:43 +0200)
If there is only one long running local task and the sibling is
forced idle, it  might not get a chance to run until a schedule
event happens on any cpu in the core.

So we check for this condition during a tick to see if a sibling
is starved and then give it a chance to schedule.

Signed-off-by: Vineeth Pillai <viremana@linux.microsoft.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Don Hiatt <dhiatt@digitalocean.com>
Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210422123308.617407840@infradead.org
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index db763f42a4b0fe62f582eafcc7ef06b836da9971..f5e1e6f96411b80c418a6a007d2ba31edfec5c3b 100644 (file)
@@ -5459,16 +5459,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
        /* reset state */
        rq->core->core_cookie = 0UL;
+       if (rq->core->core_forceidle) {
+               need_sync = true;
+               rq->core->core_forceidle = false;
+       }
        for_each_cpu(i, smt_mask) {
                struct rq *rq_i = cpu_rq(i);
 
                rq_i->core_pick = NULL;
 
-               if (rq_i->core_forceidle) {
-                       need_sync = true;
-                       rq_i->core_forceidle = false;
-               }
-
                if (i != cpu)
                        update_rq_clock(rq_i);
        }
@@ -5588,8 +5587,10 @@ next_class:;
                if (!rq_i->core_pick)
                        continue;
 
-               if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running)
-                       rq_i->core_forceidle = true;
+               if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running &&
+                   !rq_i->core->core_forceidle) {
+                       rq_i->core->core_forceidle = true;
+               }
 
                if (i == cpu) {
                        rq_i->core_pick = NULL;
index 08be7a2eb05b49f483dd76ee7ef531d5856c2d07..4d1ecab41e8043bf94c95b94f5be3234c9b4c65d 100644 (file)
@@ -10767,6 +10767,44 @@ static void rq_offline_fair(struct rq *rq)
 
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_SCHED_CORE
+static inline bool
+__entity_slice_used(struct sched_entity *se, int min_nr_tasks)
+{
+       u64 slice = sched_slice(cfs_rq_of(se), se);
+       u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
+
+       return (rtime * min_nr_tasks > slice);
+}
+
+#define MIN_NR_TASKS_DURING_FORCEIDLE  2
+static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
+{
+       if (!sched_core_enabled(rq))
+               return;
+
+       /*
+        * If runqueue has only one task which used up its slice and
+        * if the sibling is forced idle, then trigger schedule to
+        * give forced idle task a chance.
+        *
+        * sched_slice() considers only this active rq and it gets the
+        * whole slice. But during force idle, we have siblings acting
+        * like a single runqueue and hence we need to consider runnable
+        * tasks on this cpu and the forced idle cpu. Ideally, we should
+        * go through the forced idle rq, but that would be a perf hit.
+        * We can assume that the forced idle cpu has atleast
+        * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
+        * if we need to give up the cpu.
+        */
+       if (rq->core->core_forceidle && rq->cfs.nr_running == 1 &&
+           __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
+               resched_curr(rq);
+}
+#else
+static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
+#endif
+
 /*
  * scheduler tick hitting a task of our scheduling class.
  *
@@ -10790,6 +10828,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
 
        update_misfit_status(curr, rq);
        update_overutilized_status(task_rq(curr));
+
+       task_tick_core(rq, curr);
 }
 
 /*
index dd44a3127e9ca078e6db396e69e8f497cb9bb6d0..db555143380d3b71bfc34cb3e32d9295641048b4 100644 (file)
@@ -1083,12 +1083,12 @@ struct rq {
        unsigned int            core_enabled;
        unsigned int            core_sched_seq;
        struct rb_root          core_tree;
-       unsigned char           core_forceidle;
 
        /* shared state */
        unsigned int            core_task_seq;
        unsigned int            core_pick_seq;
        unsigned long           core_cookie;
+       unsigned char           core_forceidle;
 #endif
 };