summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAl Viro <viro@parcelfarce.linux.theplanet.co.uk>2005-11-13 16:06:55 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-13 18:14:13 -0800
commita1261f54611ec4ad6a7ab7080f86747e3ac3685b (patch)
tree8a65c419da590e3712543f69284fb5f8cd613a37
parent7feacd53347c04aee789ba5d632eda0c3fc421c4 (diff)
downloadlwn-a1261f54611ec4ad6a7ab7080f86747e3ac3685b.tar.gz
lwn-a1261f54611ec4ad6a7ab7080f86747e3ac3685b.zip
[PATCH] m68k: introduce task_thread_info
new helper - task_thread_info(task). On platforms that have thread_info allocated separately (i.e. in default case) it simply returns task->thread_info. m68k wants (and for good reasons) to embed its thread_info into task_struct. So it will (in later patch) have task_thread_info() of its own. For now we just add a macro for generic case and convert existing instances of its body in core kernel to uses of new macro. Obviously safe - all normal architectures get the same preprocessor output they used to get. Signed-off-by: Al Viro <viro@parcelfarce.linux.theplanet.co.uk> Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/sched.h16
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/sched.c6
4 files changed, 15 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2bbf968b23d9..f8650314ba2f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1233,32 +1233,34 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+#define task_thread_info(task) (task)->thread_info
+
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- set_ti_thread_flag(tsk->thread_info,flag);
+ set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- clear_ti_thread_flag(tsk->thread_info,flag);
+ clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- return test_and_set_ti_thread_flag(tsk->thread_info,flag);
+ return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- return test_and_clear_ti_thread_flag(tsk->thread_info,flag);
+ return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- return test_ti_thread_flag(tsk->thread_info,flag);
+ return test_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void set_tsk_need_resched(struct task_struct *tsk)
@@ -1329,12 +1331,12 @@ extern void signal_wake_up(struct task_struct *t, int resume_stopped);
static inline unsigned int task_cpu(const struct task_struct *p)
{
- return p->thread_info->cpu;
+ return task_thread_info(p)->cpu;
}
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
- p->thread_info->cpu = cpu;
+ task_thread_info(p)->cpu = cpu;
}
#else
diff --git a/kernel/exit.c b/kernel/exit.c
index 452a1d116178..ee515683b92d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -859,7 +859,7 @@ fastcall NORET_TYPE void do_exit(long code)
if (group_dead && tsk->signal->leader)
disassociate_ctty(1);
- module_put(tsk->thread_info->exec_domain->module);
+ module_put(task_thread_info(tsk)->exec_domain->module);
if (tsk->binfmt)
module_put(tsk->binfmt->module);
diff --git a/kernel/fork.c b/kernel/fork.c
index 158710d22566..7ef352ce347b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -919,7 +919,7 @@ static task_t *copy_process(unsigned long clone_flags,
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
- if (!try_module_get(p->thread_info->exec_domain->module))
+ if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
if (p->binfmt && !try_module_get(p->binfmt->module))
@@ -1180,7 +1180,7 @@ bad_fork_cleanup:
if (p->binfmt)
module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
- module_put(p->thread_info->exec_domain->module);
+ module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
put_group_info(p->group_info);
atomic_dec(&p->user->processes);
diff --git a/kernel/sched.c b/kernel/sched.c
index b6506671b2be..831f7e9d8f1c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1437,7 +1437,7 @@ void fastcall sched_fork(task_t *p, int clone_flags)
#endif
#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
- p->thread_info->preempt_count = 1;
+ task_thread_info(p)->preempt_count = 1;
#endif
/*
* Share the timeslice between parent and child, thus the
@@ -4410,9 +4410,9 @@ void __devinit init_idle(task_t *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
- idle->thread_info->preempt_count = (idle->lock_depth >= 0);
+ task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
#else
- idle->thread_info->preempt_count = 0;
+ task_thread_info(idle)->preempt_count = 0;
#endif
}