summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/kexec_elf_64.c
diff options
context:
space:
mode:
authorThiago Jung Bauermann <bauerman@linux.ibm.com>2019-05-22 19:01:58 -0300
committerMichael Ellerman <mpe@ellerman.id.au>2019-05-23 14:00:32 +1000
commit8b909e3548706cbebc0a676067b81aadda57f47e (patch)
tree9f3ad256ddf708bad80add6f5275d1695c432bbb /arch/powerpc/kernel/kexec_elf_64.c
parent3202e35ec1c8fc19cea24253ff83edf702a60a02 (diff)
downloadlwn-8b909e3548706cbebc0a676067b81aadda57f47e.tar.gz
lwn-8b909e3548706cbebc0a676067b81aadda57f47e.zip
powerpc/kexec: Fix loading of kernel + initramfs with kexec_file_load()
Commit b6664ba42f14 ("s390, kexec_file: drop arch_kexec_mem_walk()") changed kexec_add_buffer() to skip searching for a memory location if kexec_buf.mem is already set, and use the address that is there. In powerpc code we reuse a kexec_buf variable for loading both the kernel and the initramfs by resetting some of the fields between those uses, but not mem. This causes kexec_add_buffer() to try to load the kernel at the same address where initramfs will be loaded, which is naturally rejected: # kexec -s -l --initrd initramfs vmlinuz kexec_file_load failed: Invalid argument Setting the mem field before every call to kexec_add_buffer() fixes this regression. Fixes: b6664ba42f14 ("s390, kexec_file: drop arch_kexec_mem_walk()") Cc: stable@vger.kernel.org # v5.0+ Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com> Reviewed-by: Dave Young <dyoung@redhat.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/kexec_elf_64.c')
-rw-r--r--arch/powerpc/kernel/kexec_elf_64.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kernel/kexec_elf_64.c
index ba4f18a43ee8..52a29fc73730 100644
--- a/arch/powerpc/kernel/kexec_elf_64.c
+++ b/arch/powerpc/kernel/kexec_elf_64.c
@@ -547,6 +547,7 @@ static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr,
kbuf.memsz = phdr->p_memsz;
kbuf.buf_align = phdr->p_align;
kbuf.buf_min = phdr->p_paddr + base;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;
@@ -581,7 +582,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size };
struct kexec_buf pbuf = { .image = image, .buf_min = 0,
- .buf_max = ppc64_rma_size, .top_down = true };
+ .buf_max = ppc64_rma_size, .top_down = true,
+ .mem = KEXEC_BUF_MEM_UNKNOWN };
ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
if (ret)
@@ -606,6 +608,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = false;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;
@@ -638,6 +641,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
kbuf.bufsz = kbuf.memsz = fdt_size;
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = true;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;