summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h128
1 files changed, 43 insertions, 85 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 023b844159c9..47972f34ea70 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -91,12 +91,6 @@ struct cpuidle_state;
#include "cpupri.h"
#include "cpudeadline.h"
-#ifdef CONFIG_SCHED_DEBUG
-# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
-#else
-# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
-#endif
-
/* task_struct::on_rq states: */
#define TASK_ON_RQ_QUEUED 1
#define TASK_ON_RQ_MIGRATING 2
@@ -998,7 +992,7 @@ struct root_domain {
* Also, some corner cases, like 'wrap around' is dangerous, but given
* that u64 is 'big enough'. So that shouldn't be a concern.
*/
- u64 visit_gen;
+ u64 visit_cookie;
#ifdef HAVE_RT_PUSH_IPI
/*
@@ -1180,10 +1174,8 @@ struct rq {
atomic_t nr_iowait;
-#ifdef CONFIG_SCHED_DEBUG
u64 last_seen_need_resched_ns;
int ticks_without_resched;
-#endif
#ifdef CONFIG_MEMBARRIER
int membarrier_state;
@@ -1571,7 +1563,7 @@ static inline void update_idle_core(struct rq *rq) { }
static inline struct task_struct *task_of(struct sched_entity *se)
{
- SCHED_WARN_ON(!entity_is_task(se));
+ WARN_ON_ONCE(!entity_is_task(se));
return container_of(se, struct task_struct, se);
}
@@ -1652,7 +1644,7 @@ static inline void assert_clock_updated(struct rq *rq)
* The only reason for not seeing a clock update since the
* last rq_pin_lock() is if we're currently skipping updates.
*/
- SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
+ WARN_ON_ONCE(rq->clock_update_flags < RQCF_ACT_SKIP);
}
static inline u64 rq_clock(struct rq *rq)
@@ -1699,7 +1691,7 @@ static inline void rq_clock_cancel_skipupdate(struct rq *rq)
static inline void rq_clock_start_loop_update(struct rq *rq)
{
lockdep_assert_rq_held(rq);
- SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP);
+ WARN_ON_ONCE(rq->clock_update_flags & RQCF_ACT_SKIP);
rq->clock_update_flags |= RQCF_ACT_SKIP;
}
@@ -1712,14 +1704,12 @@ static inline void rq_clock_stop_loop_update(struct rq *rq)
struct rq_flags {
unsigned long flags;
struct pin_cookie cookie;
-#ifdef CONFIG_SCHED_DEBUG
/*
* A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
* current pin context is stashed here in case it needs to be
* restored in rq_repin_lock().
*/
unsigned int clock_update_flags;
-#endif
};
extern struct balance_callback balance_push_callback;
@@ -1770,21 +1760,18 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
{
rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
-#ifdef CONFIG_SCHED_DEBUG
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
rf->clock_update_flags = 0;
-# ifdef CONFIG_SMP
- SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback);
-# endif
+#ifdef CONFIG_SMP
+ WARN_ON_ONCE(rq->balance_callback && rq->balance_callback != &balance_push_callback);
#endif
}
static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
{
-#ifdef CONFIG_SCHED_DEBUG
if (rq->clock_update_flags > RQCF_ACT_SKIP)
rf->clock_update_flags = RQCF_UPDATED;
-#endif
+
scx_rq_clock_invalidate(rq);
lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
}
@@ -1793,12 +1780,10 @@ static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
{
lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
-#ifdef CONFIG_SCHED_DEBUG
/*
* Restore the value we stashed in @rf for this pin context.
*/
rq->clock_update_flags |= rf->clock_update_flags;
-#endif
}
extern
@@ -2072,9 +2057,7 @@ struct sched_group_capacity {
unsigned long next_update;
int imbalance; /* XXX unrelated to capacity but shared group state */
-#ifdef CONFIG_SCHED_DEBUG
int id;
-#endif
unsigned long cpumask[]; /* Balance mask */
};
@@ -2114,13 +2097,8 @@ static inline struct cpumask *group_balance_mask(struct sched_group *sg)
extern int group_balance_cpu(struct sched_group *sg);
-#ifdef CONFIG_SCHED_DEBUG
extern void update_sched_domain_debugfs(void);
extern void dirty_sched_domain_sysctl(int cpu);
-#else
-static inline void update_sched_domain_debugfs(void) { }
-static inline void dirty_sched_domain_sysctl(int cpu) { }
-#endif
extern int sched_update_scaling(void);
@@ -2200,13 +2178,8 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
}
/*
- * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
+ * Tunables:
*/
-#ifdef CONFIG_SCHED_DEBUG
-# define const_debug __read_mostly
-#else
-# define const_debug const
-#endif
#define SCHED_FEAT(name, enabled) \
__SCHED_FEAT_##name ,
@@ -2218,13 +2191,11 @@ enum {
#undef SCHED_FEAT
-#ifdef CONFIG_SCHED_DEBUG
-
/*
* To support run-time toggling of sched features, all the translation units
* (but core.c) reference the sysctl_sched_features defined in core.c.
*/
-extern const_debug unsigned int sysctl_sched_features;
+extern __read_mostly unsigned int sysctl_sched_features;
#ifdef CONFIG_JUMP_LABEL
@@ -2246,24 +2217,6 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#endif /* !CONFIG_JUMP_LABEL */
-#else /* !SCHED_DEBUG: */
-
-/*
- * Each translation unit has its own copy of sysctl_sched_features to allow
- * constants propagation at compile time and compiler optimization based on
- * features default.
- */
-#define SCHED_FEAT(name, enabled) \
- (1UL << __SCHED_FEAT_##name) * enabled |
-static const_debug __maybe_unused unsigned int sysctl_sched_features =
-#include "features.h"
- 0;
-#undef SCHED_FEAT
-
-#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
-
-#endif /* !SCHED_DEBUG */
-
extern struct static_key_false sched_numa_balancing;
extern struct static_key_false sched_schedstats;
@@ -2685,7 +2638,7 @@ static inline void idle_set_state(struct rq *rq,
static inline struct cpuidle_state *idle_get_state(struct rq *rq)
{
- SCHED_WARN_ON(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held());
return rq->idle_state;
}
@@ -2843,12 +2796,11 @@ extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
# define SCHED_NR_MIGRATE_BREAK 32
#endif
-extern const_debug unsigned int sysctl_sched_nr_migrate;
-extern const_debug unsigned int sysctl_sched_migration_cost;
+extern __read_mostly unsigned int sysctl_sched_nr_migrate;
+extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_base_slice;
-#ifdef CONFIG_SCHED_DEBUG
extern int sysctl_resched_latency_warn_ms;
extern int sysctl_resched_latency_warn_once;
@@ -2859,7 +2811,6 @@ extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_numa_balancing_hot_threshold;
-#endif
#ifdef CONFIG_SCHED_HRTICK
@@ -2932,7 +2883,6 @@ unsigned long arch_scale_freq_capacity(int cpu)
}
#endif
-#ifdef CONFIG_SCHED_DEBUG
/*
* In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to
* acquire rq lock instead of rq_lock(). So at the end of these two functions
@@ -2947,9 +2897,6 @@ static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2)
rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
#endif
}
-#else
-static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) { }
-#endif
#define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \
__DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \
@@ -3162,7 +3109,6 @@ extern struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq);
extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
-#ifdef CONFIG_SCHED_DEBUG
extern bool sched_debug_verbose;
extern void print_cfs_stats(struct seq_file *m, int cpu);
@@ -3173,15 +3119,12 @@ extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
extern void resched_latency_warn(int cpu, u64 latency);
-# ifdef CONFIG_NUMA_BALANCING
+#ifdef CONFIG_NUMA_BALANCING
extern void show_numa_stats(struct task_struct *p, struct seq_file *m);
extern void
print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
unsigned long tpf, unsigned long gsf, unsigned long gpf);
-# endif /* CONFIG_NUMA_BALANCING */
-#else /* !CONFIG_SCHED_DEBUG: */
-static inline void resched_latency_warn(int cpu, u64 latency) { }
-#endif /* !CONFIG_SCHED_DEBUG */
+#endif /* CONFIG_NUMA_BALANCING */
extern void init_cfs_rq(struct cfs_rq *cfs_rq);
extern void init_rt_rq(struct rt_rq *rt_rq);
@@ -3394,6 +3337,31 @@ static inline bool update_other_load_avgs(struct rq *rq) { return false; }
unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
+/*
+ * When uclamp is compiled in, the aggregation at rq level is 'turned off'
+ * by default in the fast path and only gets turned on once userspace performs
+ * an operation that requires it.
+ *
+ * Returns true if userspace opted-in to use uclamp and aggregation at rq level
+ * hence is active.
+ */
+static inline bool uclamp_is_used(void)
+{
+ return static_branch_likely(&sched_uclamp_used);
+}
+
+/*
+ * Enabling static branches would get the cpus_read_lock(),
+ * check whether uclamp_is_used before enable it to avoid always
+ * calling cpus_read_lock(). Because we never disable this
+ * static key once enable it.
+ */
+static inline void sched_uclamp_enable(void)
+{
+ if (!uclamp_is_used())
+ static_branch_enable(&sched_uclamp_used);
+}
+
static inline unsigned long uclamp_rq_get(struct rq *rq,
enum uclamp_id clamp_id)
{
@@ -3417,7 +3385,7 @@ static inline bool uclamp_rq_is_capped(struct rq *rq)
unsigned long rq_util;
unsigned long max_util;
- if (!static_branch_likely(&sched_uclamp_used))
+ if (!uclamp_is_used())
return false;
rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq);
@@ -3426,19 +3394,6 @@ static inline bool uclamp_rq_is_capped(struct rq *rq)
return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util;
}
-/*
- * When uclamp is compiled in, the aggregation at rq level is 'turned off'
- * by default in the fast path and only gets turned on once userspace performs
- * an operation that requires it.
- *
- * Returns true if userspace opted-in to use uclamp and aggregation at rq level
- * hence is active.
- */
-static inline bool uclamp_is_used(void)
-{
- return static_branch_likely(&sched_uclamp_used);
-}
-
#define for_each_clamp_id(clamp_id) \
for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
@@ -3486,6 +3441,8 @@ static inline bool uclamp_is_used(void)
return false;
}
+static inline void sched_uclamp_enable(void) {}
+
static inline unsigned long
uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id)
{
@@ -3619,6 +3576,7 @@ extern int preempt_dynamic_mode;
extern int sched_dynamic_mode(const char *str);
extern void sched_dynamic_update(int mode);
#endif
+extern const char *preempt_modes[];
#ifdef CONFIG_SCHED_MM_CID