summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2008-11-24 17:06:57 +0100
committerIngo Molnar <mingo@elte.hu>2009-01-07 18:52:44 +0100
commit490dea45d00f01847ebebd007685d564aaf2cd98 (patch)
treea1f559fd497b10c21479b378ffb262d517cb627b /include/linux/sched.h
parentede6f5aea054d3fb67c78857f7abdee602302043 (diff)
downloadlwn-490dea45d00f01847ebebd007685d564aaf2cd98.tar.gz
lwn-490dea45d00f01847ebebd007685d564aaf2cd98.zip
itimers: remove the per-cpu-ish-ness
Either we bounce once cacheline per cpu per tick, yielding n^2 bounces or we just bounce a single.. Also, using per-cpu allocations for the thread-groups complicates the per-cpu allocator in that its currently aimed to be a fixed sized allocator and the only possible extention to that would be vmap based, which is seriously constrained on 32 bit archs. So making the per-cpu memory requirement depend on the number of processes is an issue. Lastly, it didn't deal with cpu-hotplug, although admittedly that might be fixable. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h29
1 files changed, 18 insertions, 11 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4cae9b81a1f8..c20943eabb4c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -450,6 +450,7 @@ struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
+ spinlock_t lock;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp stime
@@ -465,7 +466,7 @@ struct task_cputime {
* used for thread group CPU clock calculations.
*/
struct thread_group_cputime {
- struct task_cputime *totals;
+ struct task_cputime totals;
};
/*
@@ -2180,24 +2181,30 @@ static inline int spin_needbreak(spinlock_t *lock)
* Thread group CPU time accounting.
*/
-extern int thread_group_cputime_alloc(struct task_struct *);
-extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
-
-static inline void thread_group_cputime_init(struct signal_struct *sig)
+static inline
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
{
- sig->cputime.totals = NULL;
+ struct task_cputime *totals = &tsk->signal->cputime.totals;
+ unsigned long flags;
+
+ spin_lock_irqsave(&totals->lock, flags);
+ *times = *totals;
+ spin_unlock_irqrestore(&totals->lock, flags);
}
-static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
+static inline void thread_group_cputime_init(struct signal_struct *sig)
{
- if (curr->signal->cputime.totals)
- return 0;
- return thread_group_cputime_alloc(curr);
+ sig->cputime.totals = (struct task_cputime){
+ .utime = cputime_zero,
+ .stime = cputime_zero,
+ .sum_exec_runtime = 0,
+ };
+
+ spin_lock_init(&sig->cputime.totals.lock);
}
static inline void thread_group_cputime_free(struct signal_struct *sig)
{
- free_percpu(sig->cputime.totals);
}
/*