summaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c74
1 files changed, 47 insertions, 27 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 24086e7e7593..f61837ad336d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
* re-elected due to buddy favours.
*/
clear_buddies(cfs_rq, curr);
+ return;
+ }
+
+ /*
+ * Ensure that a task that missed wakeup preemption by a
+ * narrow margin doesn't have to wait for a full slice.
+ * This also mitigates buddy induced latencies under load.
+ */
+ if (!sched_feat(WAKEUP_PREEMPT))
+ return;
+
+ if (delta_exec < sysctl_sched_min_granularity)
+ return;
+
+ if (cfs_rq->nr_running > 1) {
+ struct sched_entity *se = __pick_next_entity(cfs_rq);
+ s64 delta = curr->vruntime - se->vruntime;
+
+ if (delta > ideal_runtime)
+ resched_task(rq_of(cfs_rq)->curr);
}
}
@@ -861,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
{
struct sched_entity *se = __pick_next_entity(cfs_rq);
+ struct sched_entity *left = se;
- if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
- return cfs_rq->next;
+ if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
+ se = cfs_rq->next;
- if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
- return cfs_rq->last;
+ /*
+ * Prefer last buddy, try to return the CPU to a preempted task.
+ */
+ if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
+ se = cfs_rq->last;
+
+ clear_buddies(cfs_rq, se);
return se;
}
@@ -1623,6 +1649,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int sync = wake_flags & WF_SYNC;
+ int scale = cfs_rq->nr_running >= sched_nr_latency;
update_curr(cfs_rq);
@@ -1637,18 +1664,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(se == pse))
return;
- /*
- * Only set the backward buddy when the current task is still on the
- * rq. This can happen when a wakeup gets interleaved with schedule on
- * the ->pre_schedule() or idle_balance() point, either of which can
- * drop the rq lock.
- *
- * Also, during early boot the idle thread is in the fair class, for
- * obvious reasons its a bad idea to schedule back to the idle thread.
- */
- if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
- set_last_buddy(se);
- if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
+ if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
set_next_buddy(pse);
/*
@@ -1694,8 +1710,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
BUG_ON(!pse);
- if (wakeup_preempt_entity(se, pse) == 1)
+ if (wakeup_preempt_entity(se, pse) == 1) {
resched_task(curr);
+ /*
+ * Only set the backward buddy when the current task is still
+ * on the rq. This can happen when a wakeup gets interleaved
+ * with schedule on the ->pre_schedule() or idle_balance()
+ * point, either of which can * drop the rq lock.
+ *
+ * Also, during early boot the idle thread is in the fair class,
+ * for obvious reasons its a bad idea to schedule back to it.
+ */
+ if (unlikely(!se->on_rq || curr == rq->idle))
+ return;
+ if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
+ set_last_buddy(se);
+ }
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1709,16 +1739,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
do {
se = pick_next_entity(cfs_rq);
- /*
- * If se was a buddy, clear it so that it will have to earn
- * the favour again.
- *
- * If se was not a buddy, clear the buddies because neither
- * was elegible to run, let them earn it again.
- *
- * IOW. unconditionally clear buddies.
- */
- __clear_buddies(cfs_rq, NULL);
set_next_entity(cfs_rq, se);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);