summaryrefslogtreecommitdiff
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-03-18 19:06:15 +0900
committerPaul Mundt <lethal@linux-sh.org>2009-03-18 19:06:15 +0900
commita6bab7b5c18501e4dd3201ae8ac1dc6da5f07acc (patch)
treefc4b97e04dc02d9aa80b3c12ae49c17ca11860a6 /arch/sh
parentb7cf6ddc13186f9272438a97aa75972d496d0b0a (diff)
downloadlwn-a6bab7b5c18501e4dd3201ae8ac1dc6da5f07acc.tar.gz
lwn-a6bab7b5c18501e4dd3201ae8ac1dc6da5f07acc.zip
sh: kexec: Drop SR.BL bit toggling.
For the time being, this creates far more problems than it solves, evident by the second local_irq_disable(). Kill all of this off and rely on IRQ disabling to protect against the VBR reload. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/kernel/machine_kexec.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index c44efb73ab1a..69268c0d8063 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -110,23 +110,22 @@ void machine_kexec(struct kimage *image)
memcpy((void *)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
- kexec_info(image);
+ kexec_info(image);
flush_cache_all();
- set_bl_bit();
#if defined(CONFIG_SH_STANDARD_BIOS)
asm volatile("ldc %0, vbr" :
: "r" (((unsigned long) gdb_vbr_vector) - 0x100)
: "memory");
#endif
+
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
(*rnk)(page_list, reboot_code_buffer, image->start);
#ifdef CONFIG_KEXEC_JUMP
asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory");
- local_irq_disable();
- clear_bl_bit();
+
if (image->preserve_context)
restore_processor_state();