diff options
author | Arjan van de Ven <arjan@infradead.org> | 2006-01-14 13:20:43 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-14 18:27:06 -0800 |
commit | 858119e159384308a5dde67776691a2ebf70df0f (patch) | |
tree | f360768f999d51edc0863917ce0bf79e88c0ec4c /kernel/sched.c | |
parent | b0a9499c3dd50d333e2aedb7e894873c58da3785 (diff) | |
download | lwn-858119e159384308a5dde67776691a2ebf70df0f.tar.gz lwn-858119e159384308a5dde67776691a2ebf70df0f.zip |
[PATCH] Unlinline a bunch of other functions
Remove the "inline" keyword from a bunch of big functions in the kernel with
the goal of shrinking it by 30kb to 40kb
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jeff Garzik <jgarzik@pobox.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e1dc903d5a75..788ecce1e0e4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -521,7 +521,7 @@ static inline void sched_info_dequeued(task_t *t) * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */ -static inline void sched_info_arrive(task_t *t) +static void sched_info_arrive(task_t *t) { unsigned long now = jiffies, diff = 0; struct runqueue *rq = task_rq(t); @@ -1007,7 +1007,7 @@ void kick_process(task_t *p) * We want to under-estimate the load of migration sources, to * balance conservatively. */ -static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) +static unsigned long __source_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); unsigned long running = rq->nr_running; @@ -1870,7 +1870,7 @@ void sched_exec(void) * pull_task - move a task from a remote runqueue to the local runqueue. * Both runqueues must be locked. */ -static inline +static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) { @@ -1892,7 +1892,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, /* * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ -static inline +static int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, struct sched_domain *sd, enum idle_type idle, int *all_pinned) @@ -2378,7 +2378,7 @@ out_balanced: * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ -static inline void idle_balance(int this_cpu, runqueue_t *this_rq) +static void idle_balance(int this_cpu, runqueue_t *this_rq) { struct sched_domain *sd; @@ -2762,7 +2762,7 @@ static inline void wakeup_busy_runqueue(runqueue_t *rq) resched_task(rq->idle); } -static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) +static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) { struct sched_domain *tmp, *sd = NULL; cpumask_t sibling_map; @@ -2816,7 +2816,7 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) return p->time_slice * (100 - sd->per_cpu_gain) / 100; } -static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) +static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) { struct sched_domain *tmp, *sd = NULL; cpumask_t sibling_map; @@ -6008,7 +6008,7 @@ next_sg: * Detach sched domains from a group of cpus specified in cpu_map * These cpus will now be attached to the NULL domain */ -static inline void detach_destroy_domains(const cpumask_t *cpu_map) +static void detach_destroy_domains(const cpumask_t *cpu_map) { int i; |