From c56dadf39761a6157239cac39e3988998c994f98 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 15 Jul 2015 12:52:03 +0300 Subject: sched/preempt, powerpc, kvm: Use need_resched() instead of should_resched() Function should_resched() is equal to (!preempt_count() && need_resched()). In preemptive kernel preempt_count here is non-zero because of vc->lock. Signed-off-by: Konstantin Khlebnikov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Graf Cc: Boris Ostrovsky Cc: David Vrabel Cc: Linus Torvalds Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20150715095203.12246.72922.stgit@buzz Signed-off-by: Ingo Molnar --- arch/powerpc/kvm/book3s_hv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 68d067ad4222..a9f753fb73a8 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) vc->runner = vcpu; if (n_ceded == vc->n_runnable) { kvmppc_vcore_blocked(vc); - } else if (should_resched()) { + } else if (need_resched()) { vc->vcore_state = VCORE_PREEMPT; /* Let something else run */ cond_resched_lock(&vc->lock); -- cgit v1.2.3 From fe32d3cd5e8eb0f82e459763374aa80797023403 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 15 Jul 2015 12:52:04 +0300 Subject: sched/preempt: Fix cond_resched_lock() and cond_resched_softirq() These functions check should_resched() before unlocking spinlock/bh-enable: preempt_count always non-zero => should_resched() always returns false. cond_resched_lock() worked iff spin_needbreak is set. This patch adds argument "preempt_offset" to should_resched(). preempt_count offset constants for that: PREEMPT_DISABLE_OFFSET - offset after preempt_disable() PREEMPT_LOCK_OFFSET - offset after spin_lock() SOFTIRQ_DISABLE_OFFSET - offset after local_bh_distable() SOFTIRQ_LOCK_OFFSET - offset after spin_lock_bh() Signed-off-by: Konstantin Khlebnikov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Graf Cc: Boris Ostrovsky Cc: David Vrabel Cc: Linus Torvalds Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: bdb438065890 ("sched: Extract the basic add/sub preempt_count modifiers") Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz Signed-off-by: Ingo Molnar --- arch/x86/include/asm/preempt.h | 4 ++-- include/asm-generic/preempt.h | 5 +++-- include/linux/preempt.h | 19 ++++++++++++++----- include/linux/sched.h | 6 ------ kernel/sched/core.c | 6 +++--- 5 files changed, 22 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index dca71714f860..b12f81022a6b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void) /* * Returns true when we need to resched and can (barring IRQ state). */ -static __always_inline bool should_resched(void) +static __always_inline bool should_resched(int preempt_offset) { - return unlikely(!raw_cpu_read_4(__preempt_count)); + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); } #ifdef CONFIG_PREEMPT diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index d0a7a4753db2..0bec580a4885 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void) /* * Returns true when we need to resched and can (barring IRQ state). */ -static __always_inline bool should_resched(void) +static __always_inline bool should_resched(int preempt_offset) { - return unlikely(!preempt_count() && tif_need_resched()); + return unlikely(preempt_count() == preempt_offset && + tif_need_resched()); } #ifdef CONFIG_PREEMPT diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 84991f185173..bea8dd8ff5e0 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -84,12 +84,20 @@ */ #define in_nmi() (preempt_count() & NMI_MASK) +/* + * The preempt_count offset after preempt_disable(); + */ #if defined(CONFIG_PREEMPT_COUNT) -# define PREEMPT_DISABLE_OFFSET 1 +# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET #else -# define PREEMPT_DISABLE_OFFSET 0 +# define PREEMPT_DISABLE_OFFSET 0 #endif +/* + * The preempt_count offset after spin_lock() + */ +#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET + /* * The preempt_count offset needed for things like: * @@ -103,7 +111,7 @@ * * Work as expected. */ -#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET) +#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) /* * Are we running in atomic context? WARNING: this macro cannot @@ -124,7 +132,8 @@ #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void preempt_count_add(int val); extern void preempt_count_sub(int val); -#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) +#define preempt_count_dec_and_test() \ + ({ preempt_count_sub(1); should_resched(0); }) #else #define preempt_count_add(val) __preempt_count_add(val) #define preempt_count_sub(val) __preempt_count_sub(val) @@ -184,7 +193,7 @@ do { \ #define preempt_check_resched() \ do { \ - if (should_resched()) \ + if (should_resched(0)) \ __preempt_schedule(); \ } while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h index 65a8a8651596..9c144657aace 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2891,12 +2891,6 @@ extern int _cond_resched(void); extern int __cond_resched_lock(spinlock_t *lock); -#ifdef CONFIG_PREEMPT_COUNT -#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET -#else -#define PREEMPT_LOCK_OFFSET 0 -#endif - #define cond_resched_lock(lock) ({ \ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ __cond_resched_lock(lock); \ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fa5826cc612f..f5fad2b12baf 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4496,7 +4496,7 @@ SYSCALL_DEFINE0(sched_yield) int __sched _cond_resched(void) { - if (should_resched()) { + if (should_resched(0)) { preempt_schedule_common(); return 1; } @@ -4514,7 +4514,7 @@ EXPORT_SYMBOL(_cond_resched); */ int __cond_resched_lock(spinlock_t *lock) { - int resched = should_resched(); + int resched = should_resched(PREEMPT_LOCK_OFFSET); int ret = 0; lockdep_assert_held(lock); @@ -4536,7 +4536,7 @@ int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); - if (should_resched()) { + if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { local_bh_enable(); preempt_schedule_common(); local_bh_disable(); -- cgit v1.2.3 From 7baa7aecdd2f009ddd00a4ad0690c6918bab5b01 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 29 Jul 2015 12:41:49 +0100 Subject: sched, arm: Remove finish_arch_switch() Fold finish_arch_switch() into switch_to(). Signed-off-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Cc: linux@arm.linux.org.uk [ Fixed up the SOB chain. ] Signed-off-by: Ingo Molnar --- arch/arm/include/asm/switch_to.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index c99e259469f7..12ebfcc1d539 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -10,7 +10,9 @@ * CPU. */ #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) -#define finish_arch_switch(prev) dsb(ish) +#define __complete_pending_tlbi() dsb(ish) +#else +#define __complete_pending_tlbi() #endif /* @@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ + __complete_pending_tlbi(); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) -- cgit v1.2.3 From 6916ce3ffff80f0102d39922ddb3b8c4540f2ea2 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 29 Jul 2015 12:14:42 +0200 Subject: sched, MIPS: Get rid of finish_arch_switch() MIPS was using finish_arch_switch() as a hook to restore and initialize CPU context for all threads, even newly created kernel and user threads. This is however entirely solvable within switch_to() so get rid of finish_arch_switch() which is in the way of scheduler cleanups. Signed-off-by: Ralf Baechle Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/mips/include/asm/switch_to.h | 48 +++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 7163cd7fdd69..9733cd0266e4 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \ } \ } while (0) +/* + * For newly created kernel threads switch_to() will return to + * ret_from_kernel_thread, newly created user threads to ret_from_fork. + * That is, everything following resume() will be skipped for new threads. + * So everything that matters to new threads should be placed before resume(). + */ #define switch_to(prev, next, last) \ do { \ - u32 __c0_stat; \ s32 __fpsave = FP_SAVE_NONE; \ __mips_mt_fpaff_switch_to(prev); \ - if (cpu_has_dsp) \ + if (cpu_has_dsp) { \ __save_dsp(prev); \ - if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \ - if (cop2_lazy_restore) \ - KSTK_STATUS(prev) &= ~ST0_CU2; \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_save(prev); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ + __restore_dsp(next); \ + } \ + if (cop2_present) { \ + set_c0_status(ST0_CU2); \ + if ((KSTK_STATUS(prev) & ST0_CU2)) { \ + if (cop2_lazy_restore) \ + KSTK_STATUS(prev) &= ~ST0_CU2; \ + cop2_save(prev); \ + } \ + if (KSTK_STATUS(next) & ST0_CU2 && \ + !cop2_lazy_restore) { \ + cop2_restore(next); \ + } \ + clear_c0_status(ST0_CU2); \ } \ __clear_software_ll_bit(); \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \ __fpsave = FP_SAVE_SCALAR; \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \ __fpsave = FP_SAVE_VECTOR; \ - (last) = resume(prev, next, task_thread_info(next), __fpsave); \ -} while (0) - -#define finish_arch_switch(prev) \ -do { \ - u32 __c0_stat; \ - if (cop2_present && !cop2_lazy_restore && \ - (KSTK_STATUS(current) & ST0_CU2)) { \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_restore(current); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ - } \ - if (cpu_has_dsp) \ - __restore_dsp(current); \ if (cpu_has_userlocal) \ - write_c0_userlocal(current_thread_info()->tp_value); \ + write_c0_userlocal(task_thread_info(next)->tp_value); \ __restore_watch(); \ disable_msa(); \ + (last) = resume(prev, next, task_thread_info(next), __fpsave); \ } while (0) #endif /* _ASM_SWITCH_TO_H */ -- cgit v1.2.3 From dfdbd59712d58e2ead89df616798968392c5423b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 29 Jul 2015 17:28:04 +0200 Subject: sched, avr32: Remove finish_arch_switch() Fold the tracing hook into switch_to() in order to remove finish_arch_switch(). Signed-off-by: Peter Zijlstra (Intel) Acked-by: Hans-Christian Egtvedt Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/avr32/include/asm/switch_to.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/avr32/include/asm/switch_to.h b/arch/avr32/include/asm/switch_to.h index 9a8e9d5208d4..6f00581c3d4f 100644 --- a/arch/avr32/include/asm/switch_to.h +++ b/arch/avr32/include/asm/switch_to.h @@ -15,11 +15,13 @@ */ #ifdef CONFIG_OWNERSHIP_TRACE #include -#define finish_arch_switch(prev) \ +#define ocd_switch(prev, next) \ do { \ ocd_write(PID, prev->pid); \ - ocd_write(PID, current->pid); \ + ocd_write(PID, next->pid); \ } while(0) +#else +#define ocd_switch(prev, next) #endif /* @@ -38,6 +40,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct cpu_context *); #define switch_to(prev, next, last) \ do { \ + ocd_switch(prev, next); \ last = __switch_to(prev, &prev->thread.cpu_context + 1, \ &next->thread.cpu_context); \ } while (0) -- cgit v1.2.3 From 08960e344800267df4980c2180e3c2c0c9b5f460 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 29 Jul 2015 17:27:17 +0200 Subject: sched, score: Remove finish_arch_switch() An empty implementation, make it go away. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/score/include/asm/switch_to.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/score/include/asm/switch_to.h b/arch/score/include/asm/switch_to.h index 031756b59ece..fda3f83308d2 100644 --- a/arch/score/include/asm/switch_to.h +++ b/arch/score/include/asm/switch_to.h @@ -8,6 +8,4 @@ do { \ (last) = resume(prev, next, task_thread_info(next)); \ } while (0) -#define finish_arch_switch(prev) do {} while (0) - #endif /* _ASM_SCORE_SWITCH_TO_H */ -- cgit v1.2.3 From b31fdac2a921c8bf5ed9fe2c908625a54ce91e92 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 29 Jul 2015 17:14:48 +0200 Subject: sched, sh: Fold finish_arch_switch() into switch_to() The code looks buggy; why would we be restoring the previous task's DSP state after we've switched to the next task? Fix that and put the restore in switch_to(), removing the need for finish_arch_switch(). Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Cc: michael@amarulasolutions.com Signed-off-by: Ingo Molnar --- arch/sh/include/asm/switch_to_32.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/switch_to_32.h b/arch/sh/include/asm/switch_to_32.h index 0c065513e7ac..7661b4ba8259 100644 --- a/arch/sh/include/asm/switch_to_32.h +++ b/arch/sh/include/asm/switch_to_32.h @@ -78,6 +78,8 @@ do { \ \ if (is_dsp_enabled(prev)) \ __save_dsp(prev); \ + if (is_dsp_enabled(next)) \ + __restore_dsp(next); \ \ __ts1 = (u32 *)&prev->thread.sp; \ __ts2 = (u32 *)&prev->thread.pc; \ @@ -125,10 +127,4 @@ do { \ last = __last; \ } while (0) -#define finish_arch_switch(prev) \ -do { \ - if (is_dsp_enabled(prev)) \ - __restore_dsp(prev); \ -} while (0) - #endif /* __ASM_SH_SWITCH_TO_32_H */ -- cgit v1.2.3 From fe363adb922583bacfbd5a30a4c773ccff9bb393 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Wed, 29 Jul 2015 13:06:29 -0400 Subject: sched, tile: Remove finish_arch_switch Move the simulator bits into switch_to() and use finish_arch_post_lock_switch() for the homecache migration bits. Signed-off-by: Chris Metcalf Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/tile/include/asm/switch_to.h | 10 +++------- arch/tile/kernel/process.c | 17 +++++++++++++++-- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h index b8f888cbe6b0..422842a56a3d 100644 --- a/arch/tile/include/asm/switch_to.h +++ b/arch/tile/include/asm/switch_to.h @@ -53,15 +53,11 @@ extern unsigned long get_switch_to_pc(void); * Kernel threads can check to see if they need to migrate their * stack whenever they return from a context switch; for user * threads, we defer until they are returning to user-space. + * We defer homecache migration until the runqueue lock is released. */ -#define finish_arch_switch(prev) do { \ - if (unlikely((prev)->state == TASK_DEAD)) \ - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ - ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ - (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ +#define finish_arch_post_lock_switch() do { \ if (current->mm == NULL && !kstack_hash && \ - current_thread_info()->homecache_cpu != smp_processor_id()) \ + current_thread_info()->homecache_cpu != raw_smp_processor_id()) \ homecache_migrate_kthread(); \ } while (0) diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index a45213781ad0..1087375cd57c 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -448,11 +448,24 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, /* * Switch kernel SP, PC, and callee-saved registers. + * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp. + * Once we return from this function we will have changed stacks + * and be running with current == next. + */ + __switch_to(prev, next, next_current_ksp0(next)); + + /* Notify the simulator of task switch and task exit. */ + if (unlikely(prev->state == TASK_DEAD)) + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | + (prev->pid << _SIM_CONTROL_OPERATOR_BITS)); + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | + (next->pid << _SIM_CONTROL_OPERATOR_BITS)); + + /* * In the context of the new task, return the old task pointer * (i.e. the task that actually called __switch_to). - * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp. */ - return __switch_to(prev, next, next_current_ksp0(next)); + return prev; } /* -- cgit v1.2.3 From bef033a3c68a0cd31c81973946be768e9dd7ba42 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 4 Aug 2015 09:42:54 +0200 Subject: sched, sparc32: Update scheduler comments in copy_thread() There's no finish_arch_switch() anymore in the latest scheduler tree. Also update some other details. Cc: David S. Miller Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Linus Torvalds Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Cc: sparclinux@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/sparc/kernel/process_32.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 50e7b626afe8..c5113c7ce2fd 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -333,11 +333,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); /* - * A new process must start with interrupts closed in 2.5, - * because this is how Mingo's scheduler works (see schedule_tail - * and finish_arch_switch). If we do not do it, a timer interrupt hits - * before we unlock, attempts to re-take the rq->lock, and then we die. - * Thus, kpsr|=PSR_PIL. + * A new process must start with interrupts disabled, see schedule_tail() + * and finish_task_switch(). (If we do not do it and if a timer interrupt + * hits before we unlock and attempts to take the rq->lock, we deadlock.) + * + * Thus, kpsr |= PSR_PIL. */ ti->ksp = (unsigned long) new_stack; p->thread.kregs = childregs; -- cgit v1.2.3 From 1eaef888158dc441dcd00c20779251cfa5e756b3 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Wed, 5 Aug 2015 10:03:32 -0400 Subject: tile: Reorganize _switch_to() Move the simulator bits into finish_arch_post_lock_switch() and properly call __switch_to() from _switch_to(). Signed-off-by: Chris Metcalf Cc: Cc: Linus Torvalds Cc: Peter Zijlstra (Intel) Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1438783412-10990-1-git-send-email-cmetcalf@ezchip.com [ Made it a delta to: fe363adb9225 ("sched, tile: Remove finish_arch_switch"). ] Signed-off-by: Ingo Molnar --- arch/tile/include/asm/switch_to.h | 2 ++ arch/tile/kernel/process.c | 16 ++++------------ 2 files changed, 6 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h index 422842a56a3d..34ee72705521 100644 --- a/arch/tile/include/asm/switch_to.h +++ b/arch/tile/include/asm/switch_to.h @@ -56,6 +56,8 @@ extern unsigned long get_switch_to_pc(void); * We defer homecache migration until the runqueue lock is released. */ #define finish_arch_post_lock_switch() do { \ + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ + (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ if (current->mm == NULL && !kstack_hash && \ current_thread_info()->homecache_cpu != raw_smp_processor_id()) \ homecache_migrate_kthread(); \ diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 1087375cd57c..7d5769310bef 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -446,26 +446,18 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, hardwall_switch_tasks(prev, next); #endif - /* - * Switch kernel SP, PC, and callee-saved registers. - * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp. - * Once we return from this function we will have changed stacks - * and be running with current == next. - */ - __switch_to(prev, next, next_current_ksp0(next)); - - /* Notify the simulator of task switch and task exit. */ + /* Notify the simulator of task exit. */ if (unlikely(prev->state == TASK_DEAD)) __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | (prev->pid << _SIM_CONTROL_OPERATOR_BITS)); - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | - (next->pid << _SIM_CONTROL_OPERATOR_BITS)); /* + * Switch kernel SP, PC, and callee-saved registers. * In the context of the new task, return the old task pointer * (i.e. the task that actually called __switch_to). + * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp. */ - return prev; + return __switch_to(prev, next, next_current_ksp0(next)); } /* -- cgit v1.2.3