summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7729f9a45a8b..94d9a6c5ff94 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2984,6 +2984,16 @@ next:
pulled++;
rem_load_move -= p->se.load.weight;
+#ifdef CONFIG_PREEMPT
+ /*
+ * NEWIDLE balancing is a source of latency, so preemptible kernels
+ * will stop after the first task is pulled to minimize the critical
+ * section.
+ */
+ if (idle == CPU_NEWLY_IDLE)
+ goto out;
+#endif
+
/*
* We only want to steal up to the prescribed amount of weighted load.
*/
@@ -3030,9 +3040,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
sd, idle, all_pinned, &this_best_prio);
class = class->next;
+#ifdef CONFIG_PREEMPT
+ /*
+ * NEWIDLE balancing is a source of latency, so preemptible
+ * kernels will stop after the first task is pulled to minimize
+ * the critical section.
+ */
if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
break;
-
+#endif
} while (class && max_load_move > total_load_moved);
return total_load_moved > 0;