summaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-05-06 17:32:43 +0200
committerIngo Molnar <mingo@kernel.org>2017-09-29 19:35:12 +0200
commit88c0616ee729067ecb412bed76ef4a8734ea5100 (patch)
tree9271e6c2ea27982118e85130b7d45df899f9e315 /kernel/sched/fair.c
parentc7b50216818ef3dca14a52e3499750fbad2d9691 (diff)
downloadlwn-88c0616ee729067ecb412bed76ef4a8734ea5100.tar.gz
lwn-88c0616ee729067ecb412bed76ef4a8734ea5100.zip
sched/fair: Change update_load_avg() arguments
Most call sites of update_load_avg() already have cfs_rq_of(se) available, pass it down instead of recomputing it. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3b5b82345774..a6c53580fea0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3480,9 +3480,8 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
#define SKIP_AGE_LOAD 0x2
/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int flags)
+static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 now = cfs_rq_clock_task(cfs_rq);
struct rq *rq = rq_of(cfs_rq);
int cpu = cpu_of(rq);
@@ -3643,9 +3642,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
-static inline void update_load_avg(struct sched_entity *se, int not_used1)
+static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
{
- cfs_rq_util_change(cfs_rq_of(se));
+ cfs_rq_util_change(cfs_rq);
}
static inline void
@@ -3796,7 +3795,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* its group cfs_rq
* - Add its new weight to cfs_rq->load.weight
*/
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
enqueue_entity_load_avg(cfs_rq, se);
update_cfs_shares(se);
account_entity_enqueue(cfs_rq, se);
@@ -3880,7 +3879,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
dequeue_entity_load_avg(cfs_rq, se);
update_stats_dequeue(cfs_rq, se, flags);
@@ -3968,7 +3967,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
}
update_stats_curr_start(cfs_rq, se);
@@ -4070,7 +4069,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
/* Put 'current' back into the tree. */
__enqueue_entity(cfs_rq, prev);
/* in !on_rq case, update occurred at dequeue */
- update_load_avg(prev, 0);
+ update_load_avg(cfs_rq, prev, 0);
}
cfs_rq->curr = NULL;
}
@@ -4086,7 +4085,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
/*
* Ensure that runnable average is periodically updated.
*/
- update_load_avg(curr, UPDATE_TG);
+ update_load_avg(cfs_rq, curr, UPDATE_TG);
update_cfs_shares(curr);
#ifdef CONFIG_SCHED_HRTICK
@@ -5004,7 +5003,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
update_cfs_shares(se);
}
@@ -5063,7 +5062,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
update_cfs_shares(se);
}
@@ -7121,7 +7120,7 @@ static void update_blocked_averages(int cpu)
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
- update_load_avg(se, 0);
+ update_load_avg(cfs_rq_of(se), se, 0);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
@@ -9295,7 +9294,7 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
}
}
#else
@@ -9307,7 +9306,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
/* Catch up with the cfs_rq and remove our load when we leave */
- update_load_avg(se, 0);
+ update_load_avg(cfs_rq, se, 0);
detach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
propagate_entity_cfs_rq(se);
@@ -9326,7 +9325,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
#endif
/* Synchronize entity with its cfs_rq */
- update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
+ update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
propagate_entity_cfs_rq(se);
@@ -9610,7 +9609,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
for_each_sched_entity(se) {
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
update_cfs_shares(se);
}
rq_unlock_irqrestore(rq, &rf);