powerpc/64s: Make prom_init require RELOCATABLE

When we boot from open firmware (OF) using PPC_OF_BOOT_TRAMPOLINE, aka.
prom_init, we run parts of the kernel at an address other than the link
address. That happens because OF loads the kernel above zero (OF is at
zero) and we run prom_init before copying the kernel down to zero.

Currently that works even for non-relocatable kernels, because we do
various fixups to the prom_init code to make it run where it's loaded.

However those fixups are not sufficient if the kernel becomes large
enough. In that case prom_init()'s final call to __start() can end up
generating a plt branch:

bl      c000000002000018 <00000078.plt_branch.__start>

That results in the kernel jumping to the linked address of __start,
0xc000000000000000, when really it needs to jump to the
0xc000000000000000 + the runtime address because the kernel is still
running at the load address.

We could do further shenanigans to handle that, see Jordan's patch for
example:
  https://lore.kernel.org/linuxppc-dev/20210421021721.1539289-1-jniethe5@gmail.com

However it is much simpler to just require a kernel with prom_init() to
be built relocatable. The result works in all configurations without
further work, and requires less code.

This should have no effect on most people, as our defconfigs and
essentially all distro configs already have RELOCATABLE enabled.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210623130454.2542945-1-mpe@ellerman.id.au
This commit is contained in:
Michael Ellerman 2021-06-23 23:04:54 +10:00
parent c6c27e3d84
commit 24d33ac5b8
2 changed files with 3 additions and 56 deletions

View File

@ -3243,54 +3243,6 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
#endif /* CONFIG_BLK_DEV_INITRD */
}
#ifdef CONFIG_PPC64
#ifdef CONFIG_RELOCATABLE
static void reloc_toc(void)
{
}
static void unreloc_toc(void)
{
}
#else
static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
{
unsigned long i;
unsigned long *toc_entry;
/* Get the start of the TOC by using r2 directly. */
asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
for (i = 0; i < nr_entries; i++) {
*toc_entry = *toc_entry + offset;
toc_entry++;
}
}
static void reloc_toc(void)
{
unsigned long offset = reloc_offset();
unsigned long nr_entries =
(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
__reloc_toc(offset, nr_entries);
mb();
}
static void unreloc_toc(void)
{
unsigned long offset = reloc_offset();
unsigned long nr_entries =
(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
mb();
__reloc_toc(-offset, nr_entries);
}
#endif
#endif
#ifdef CONFIG_PPC_SVM
/*
* Perform the Enter Secure Mode ultracall.
@ -3324,14 +3276,12 @@ static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
* relocated it so the check will fail. Restore the original image by
* relocating it back to the kernel virtual base address.
*/
if (IS_ENABLED(CONFIG_RELOCATABLE))
relocate(KERNELBASE);
relocate(KERNELBASE);
ret = enter_secure_mode(kbase, fdt);
/* Relocate the kernel again. */
if (IS_ENABLED(CONFIG_RELOCATABLE))
relocate(kbase);
relocate(kbase);
if (ret != U_SUCCESS) {
prom_printf("Returned %d from switching to secure mode.\n", ret);
@ -3359,8 +3309,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
#ifdef CONFIG_PPC32
unsigned long offset = reloc_offset();
reloc_got2(offset);
#else
reloc_toc();
#endif
/*
@ -3537,8 +3485,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
#ifdef CONFIG_PPC32
reloc_got2(-offset);
#else
unreloc_toc();
#endif
/* Move to secure memory if we're supposed to be secure guests. */

View File

@ -51,6 +51,7 @@ config PPC_NATIVE
config PPC_OF_BOOT_TRAMPOLINE
bool "Support booting from Open Firmware or yaboot"
depends on PPC_BOOK3S_32 || PPC64
select RELOCATABLE if PPC64
default y
help
Support from booting from Open Firmware or yaboot using an