diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:02 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:02 +0200 |
commit | 2bd8e6d422a4f44c0994f909317eba80b0fe08a1 (patch) | |
tree | 3548dd01c056e3f7c3c04fd85cc4726b95677c15 /kernel/sched_fair.c | |
parent | 38ad464d410dadceda1563f36bdb0be7fe4c8938 (diff) | |
download | lwn-2bd8e6d422a4f44c0994f909317eba80b0fe08a1.tar.gz lwn-2bd8e6d422a4f44c0994f909317eba80b0fe08a1.zip |
sched: use constants if !CONFIG_SCHED_DEBUG
use constants if !CONFIG_SCHED_DEBUG.
this speeds up the code and reduces code-size:
text data bss dec hex filename
27464 3014 16 30494 771e sched.o.before
26929 3010 20 29959 7507 sched.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5c15d8ae92cb..2e84aaffe425 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -21,6 +21,15 @@ */ /* + * Tunables that become constants when CONFIG_SCHED_DEBUG is off: + */ +#ifdef CONFIG_SCHED_DEBUG +# define const_debug __read_mostly +#else +# define const_debug static const +#endif + +/* * Targeted preemption latency for CPU-bound tasks: * (default: 20ms, units: nanoseconds) * @@ -34,7 +43,13 @@ * systems, 4x on 8-way systems, 5x on 16-way systems, etc.) * Targeted preemption latency for CPU-bound tasks: */ -unsigned int sysctl_sched_latency __read_mostly = 20000000ULL; +const_debug unsigned int sysctl_sched_latency = 20000000ULL; + +/* + * After fork, child runs first. (default) If set to 0 then + * parent will (try to) run first. + */ +const_debug unsigned int sysctl_sched_child_runs_first = 1; /* * Minimal preemption granularity for CPU-bound tasks: @@ -58,7 +73,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield; * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. */ -unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL; +const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL; /* * SCHED_OTHER wake-up granularity. @@ -68,13 +83,10 @@ unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL; * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. */ -unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL; +const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL; -unsigned int sysctl_sched_stat_granularity __read_mostly; +const_debug unsigned int sysctl_sched_stat_granularity; -/* - * Initialized in sched_init_granularity() [to 5 times the base granularity]: - */ unsigned int sysctl_sched_runtime_limit __read_mostly; /* @@ -89,7 +101,7 @@ enum { SCHED_FEAT_SKIP_INITIAL = 32, }; -unsigned int sysctl_sched_features __read_mostly = +const_debug unsigned int sysctl_sched_features = SCHED_FEAT_FAIR_SLEEPERS *1 | SCHED_FEAT_SLEEPER_AVG *0 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | |