diff options
author | akpm <akpm@linux-foundation.org> | 2022-06-27 10:31:44 -0700 |
---|---|---|
committer | akpm <akpm@linux-foundation.org> | 2022-06-27 10:31:44 -0700 |
commit | ee56c3e8eec166f4e4a2ca842b7804d14f3a0208 (patch) | |
tree | 3bcee4a6090d681b7bbb49d1946c95798f166159 /kernel/sched/core.c | |
parent | 00c9d5632277b21ba8802e26c27254cd9d0dfa13 (diff) | |
parent | 03c765b0e3b4cb5063276b086c76f7a612856a9a (diff) | |
download | lwn-ee56c3e8eec166f4e4a2ca842b7804d14f3a0208.tar.gz lwn-ee56c3e8eec166f4e4a2ca842b7804d14f3a0208.zip |
Merge branch 'master' into mm-nonmm-stable
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 36 |
1 files changed, 33 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bfa7452ca92e..da0bf6fe9ecd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4798,25 +4798,55 @@ static void do_balance_callbacks(struct rq *rq, struct callback_head *head) static void balance_push(struct rq *rq); +/* + * balance_push_callback is a right abuse of the callback interface and plays + * by significantly different rules. + * + * Where the normal balance_callback's purpose is to be ran in the same context + * that queued it (only later, when it's safe to drop rq->lock again), + * balance_push_callback is specifically targeted at __schedule(). + * + * This abuse is tolerated because it places all the unlikely/odd cases behind + * a single test, namely: rq->balance_callback == NULL. + */ struct callback_head balance_push_callback = { .next = NULL, .func = (void (*)(struct callback_head *))balance_push, }; -static inline struct callback_head *splice_balance_callbacks(struct rq *rq) +static inline struct callback_head * +__splice_balance_callbacks(struct rq *rq, bool split) { struct callback_head *head = rq->balance_callback; + if (likely(!head)) + return NULL; + lockdep_assert_rq_held(rq); - if (head) + /* + * Must not take balance_push_callback off the list when + * splice_balance_callbacks() and balance_callbacks() are not + * in the same rq->lock section. + * + * In that case it would be possible for __schedule() to interleave + * and observe the list empty. + */ + if (split && head == &balance_push_callback) + head = NULL; + else rq->balance_callback = NULL; return head; } +static inline struct callback_head *splice_balance_callbacks(struct rq *rq) +{ + return __splice_balance_callbacks(rq, true); +} + static void __balance_callbacks(struct rq *rq) { - do_balance_callbacks(rq, splice_balance_callbacks(rq)); + do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); } static inline void balance_callbacks(struct rq *rq, struct callback_head *head) |