summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2015-07-29 12:14:42 +0200
committerRalf Baechle <ralf@linux-mips.org>2015-09-03 12:08:01 +0200
commitf51246efee2b6bc72e86bc1d16599fc7c455b986 (patch)
treef7087e74fb1f827ed70271f0d8ce03466535bb6c
parentf1f5e414851fdb69a3200e5c15799ea4788d423e (diff)
downloadlwn-f51246efee2b6bc72e86bc1d16599fc7c455b986.tar.gz
lwn-f51246efee2b6bc72e86bc1d16599fc7c455b986.zip
MIPS: Get rid of finish_arch_switch().
MIPS was using finish_arch_switch() as a hook to restore and initialize CPU context for all threads, even newly created kernel and user threads. This is however entirely solvable within switch_to() so get rid of finish_arch_switch() which is in the way of scheduler cleanups. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/include/asm/switch_to.h48
1 files changed, 23 insertions, 25 deletions
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 7163cd7fdd69..9733cd0266e4 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \
} \
} while (0)
+/*
+ * For newly created kernel threads switch_to() will return to
+ * ret_from_kernel_thread, newly created user threads to ret_from_fork.
+ * That is, everything following resume() will be skipped for new threads.
+ * So everything that matters to new threads should be placed before resume().
+ */
#define switch_to(prev, next, last) \
do { \
- u32 __c0_stat; \
s32 __fpsave = FP_SAVE_NONE; \
__mips_mt_fpaff_switch_to(prev); \
- if (cpu_has_dsp) \
+ if (cpu_has_dsp) { \
__save_dsp(prev); \
- if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \
- if (cop2_lazy_restore) \
- KSTK_STATUS(prev) &= ~ST0_CU2; \
- __c0_stat = read_c0_status(); \
- write_c0_status(__c0_stat | ST0_CU2); \
- cop2_save(prev); \
- write_c0_status(__c0_stat & ~ST0_CU2); \
+ __restore_dsp(next); \
+ } \
+ if (cop2_present) { \
+ set_c0_status(ST0_CU2); \
+ if ((KSTK_STATUS(prev) & ST0_CU2)) { \
+ if (cop2_lazy_restore) \
+ KSTK_STATUS(prev) &= ~ST0_CU2; \
+ cop2_save(prev); \
+ } \
+ if (KSTK_STATUS(next) & ST0_CU2 && \
+ !cop2_lazy_restore) { \
+ cop2_restore(next); \
+ } \
+ clear_c0_status(ST0_CU2); \
} \
__clear_software_ll_bit(); \
if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
__fpsave = FP_SAVE_SCALAR; \
if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
__fpsave = FP_SAVE_VECTOR; \
- (last) = resume(prev, next, task_thread_info(next), __fpsave); \
-} while (0)
-
-#define finish_arch_switch(prev) \
-do { \
- u32 __c0_stat; \
- if (cop2_present && !cop2_lazy_restore && \
- (KSTK_STATUS(current) & ST0_CU2)) { \
- __c0_stat = read_c0_status(); \
- write_c0_status(__c0_stat | ST0_CU2); \
- cop2_restore(current); \
- write_c0_status(__c0_stat & ~ST0_CU2); \
- } \
- if (cpu_has_dsp) \
- __restore_dsp(current); \
if (cpu_has_userlocal) \
- write_c0_userlocal(current_thread_info()->tp_value); \
+ write_c0_userlocal(task_thread_info(next)->tp_value); \
__restore_watch(); \
disable_msa(); \
+ (last) = resume(prev, next, task_thread_info(next), __fpsave); \
} while (0)
#endif /* _ASM_SWITCH_TO_H */