summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/ext.h39
-rw-r--r--kernel/sched/sched.h65
2 files changed, 59 insertions, 45 deletions
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index 1d7837bdfaba..32d3a51f591a 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -8,24 +8,6 @@
*/
#ifdef CONFIG_SCHED_CLASS_EXT
-struct sched_enq_and_set_ctx {
- struct task_struct *p;
- int queue_flags;
- bool queued;
- bool running;
-};
-
-void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
- struct sched_enq_and_set_ctx *ctx);
-void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
-
-extern const struct sched_class ext_sched_class;
-
-DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled);
-DECLARE_STATIC_KEY_FALSE(__scx_switched_all);
-#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
-#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
-
void scx_tick(struct rq *rq);
void init_scx_entity(struct sched_ext_entity *scx);
void scx_pre_fork(struct task_struct *p);
@@ -52,22 +34,6 @@ static inline bool task_on_scx(const struct task_struct *p)
return scx_enabled() && p->sched_class == &ext_sched_class;
}
-static inline const struct sched_class *next_active_class(const struct sched_class *class)
-{
- class++;
- if (scx_switched_all() && class == &fair_sched_class)
- class++;
- if (!scx_enabled() && class == &ext_sched_class)
- class++;
- return class;
-}
-
-#define for_active_class_range(class, _from, _to) \
- for (class = (_from); class != (_to); class = next_active_class(class))
-
-#define for_each_active_class(class) \
- for_active_class_range(class, __sched_class_highest, __sched_class_lowest)
-
#ifdef CONFIG_SCHED_CORE
bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
bool in_fi);
@@ -75,9 +41,6 @@ bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
#else /* CONFIG_SCHED_CLASS_EXT */
-#define scx_enabled() false
-#define scx_switched_all() false
-
static inline void scx_tick(struct rq *rq) {}
static inline void scx_pre_fork(struct task_struct *p) {}
static inline int scx_fork(struct task_struct *p) { return 0; }
@@ -91,8 +54,6 @@ static inline int scx_check_setscheduler(struct task_struct *p, int policy) { re
static inline bool task_on_scx(const struct task_struct *p) { return false; }
static inline void init_sched_ext_class(void) {}
-#define for_each_active_class for_each_class
-
#endif /* CONFIG_SCHED_CLASS_EXT */
#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a12ea0281242..04184e87ba7c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2438,19 +2438,54 @@ const struct sched_class name##_sched_class \
extern struct sched_class __sched_class_highest[];
extern struct sched_class __sched_class_lowest[];
+extern const struct sched_class stop_sched_class;
+extern const struct sched_class dl_sched_class;
+extern const struct sched_class rt_sched_class;
+extern const struct sched_class fair_sched_class;
+extern const struct sched_class idle_sched_class;
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+extern const struct sched_class ext_sched_class;
+
+DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */
+DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */
+
+#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
+#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
+#else /* !CONFIG_SCHED_CLASS_EXT */
+#define scx_enabled() false
+#define scx_switched_all() false
+#endif /* !CONFIG_SCHED_CLASS_EXT */
+
+/*
+ * Iterate only active classes. SCX can take over all fair tasks or be
+ * completely disabled. If the former, skip fair. If the latter, skip SCX.
+ */
+static inline const struct sched_class *next_active_class(const struct sched_class *class)
+{
+ class++;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ if (scx_switched_all() && class == &fair_sched_class)
+ class++;
+ if (!scx_enabled() && class == &ext_sched_class)
+ class++;
+#endif
+ return class;
+}
+
#define for_class_range(class, _from, _to) \
for (class = (_from); class < (_to); class++)
#define for_each_class(class) \
for_class_range(class, __sched_class_highest, __sched_class_lowest)
-#define sched_class_above(_a, _b) ((_a) < (_b))
+#define for_active_class_range(class, _from, _to) \
+ for (class = (_from); class != (_to); class = next_active_class(class))
-extern const struct sched_class stop_sched_class;
-extern const struct sched_class dl_sched_class;
-extern const struct sched_class rt_sched_class;
-extern const struct sched_class fair_sched_class;
-extern const struct sched_class idle_sched_class;
+#define for_each_active_class(class) \
+ for_active_class_range(class, __sched_class_highest, __sched_class_lowest)
+
+#define sched_class_above(_a, _b) ((_a) < (_b))
static inline bool sched_stop_runnable(struct rq *rq)
{
@@ -3698,6 +3733,24 @@ static inline void balance_callbacks(struct rq *rq, struct balance_callback *hea
#endif
+#ifdef CONFIG_SCHED_CLASS_EXT
+/*
+ * Used by SCX in the enable/disable paths to move tasks between sched_classes
+ * and establish invariants.
+ */
+struct sched_enq_and_set_ctx {
+ struct task_struct *p;
+ int queue_flags;
+ bool queued;
+ bool running;
+};
+
+void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
+ struct sched_enq_and_set_ctx *ctx);
+void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
#include "ext.h"
#endif /* _KERNEL_SCHED_SCHED_H */