ARM fixes for 5.14:

- Resolve a Keystone 2 kernel mapping regression
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAmEoqIsACgkQ9OeQG+St
 rGQ+Gw//Q6EA7x0ROaW369jSlxizpeW5zPBn0UyR0OTMH0xcBbVeZuhZCEnXaxSm
 DHlTN0TSfSBg1VuPYhmnAxi6E6edxIMOOci/ZTnnOj/VvFnvr9rvV2syfKiBmHA1
 rsz7XJ/krQLavnlRoBoRx7FQZABQxE9M1J7Lqr3/7IwCfw3mSsGyYVYCwpiOEMSI
 Um84NSPlyTtSIaEY050oZC3PXPoNRGIY3Ex/DyHuSx5Z/Kpv+sDA6WXS3qvO72Ar
 zpDeW2I/0Ux5P0IZnIngi6YFwQgk/T62LSAvbzmwwesDxHUhJwnSw6ujo9T4VYpc
 c+b15TCjdkjU0akWK2OVk9C3YFdWkmoTPGdY4TIH3Ijua6L5N6z8nxuVTZj6Es9p
 GH28GB0dTbiwJperxAqLWlFjlX13anpwAbOcvwrp41f8/fDY+MwbkPVHwxWVnnOw
 UFxec0nN8yQxs6N5F0rszn+zhFhps4CSD8gZvZwnkm5GXBkLzM8Dg73pHTh7w9Oq
 UU1GTjDpDedmJrdLWfDy7db5TVNAv2Uh6li2VuvTKj+PqxjCTW1fiABdle4hZjS3
 XVrmhNwvB0OcYVx6GyqloeoVCufWSCExQK8h3qAupbe7i2wFlBpdodLPicolTe90
 NDVCizL3nBfVOldSPljI0E7mPtHjosVR9YuRYBNetIHHbfkKy9I=
 =kRsE
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM fix from Russell King:
 "Resolve a Keystone 2 kernel mapping regression"

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 9104/2: Fix Keystone 2 kernel mapping regression
This commit is contained in:
Linus Torvalds 2021-08-27 09:00:43 -07:00
commit 94606b893f
4 changed files with 27 additions and 8 deletions

View file

@ -160,10 +160,11 @@ extern unsigned long vectors_base;
/*
* Physical start and end address of the kernel sections. These addresses are
* 2MB-aligned to match the section mappings placed over the kernel.
* 2MB-aligned to match the section mappings placed over the kernel. We use
* u64 so that LPAE mappings beyond the 32bit limit will work out as well.
*/
extern u32 kernel_sec_start;
extern u32 kernel_sec_end;
extern u64 kernel_sec_start;
extern u64 kernel_sec_end;
/*
* Physical vs virtual RAM address space conversion. These are

View file

@ -49,7 +49,8 @@
/*
* This needs to be assigned at runtime when the linker symbols are
* resolved.
* resolved. These are unsigned 64bit really, but in this assembly code
* We store them as 32bit.
*/
.pushsection .data
.align 2
@ -57,7 +58,9 @@
.globl kernel_sec_end
kernel_sec_start:
.long 0
.long 0
kernel_sec_end:
.long 0
.long 0
.popsection
@ -250,7 +253,11 @@ __create_page_tables:
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
ldr r6, =(_end - 1)
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
str r8, [r5] @ Save physical start of kernel
#ifdef CONFIG_CPU_ENDIAN_BE8
str r8, [r5, #4] @ Save physical start of kernel (BE)
#else
str r8, [r5] @ Save physical start of kernel (LE)
#endif
orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: str r3, [r0], #1 << PMD_ORDER
@ -259,7 +266,11 @@ __create_page_tables:
bls 1b
eor r3, r3, r7 @ Remove the MMU flags
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
str r3, [r5] @ Save physical end of kernel
#ifdef CONFIG_CPU_ENDIAN_BE8
str r3, [r5, #4] @ Save physical end of kernel (BE)
#else
str r3, [r5] @ Save physical end of kernel (LE)
#endif
#ifdef CONFIG_XIP_KERNEL
/*

View file

@ -1608,6 +1608,13 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
if (offset == 0)
return;
/*
* Offset the kernel section physical offsets so that the kernel
* mapping will work out later on.
*/
kernel_sec_start += offset;
kernel_sec_end += offset;
/*
* Get the address of the remap function in the 1:1 identity
* mapping setup by the early page table assembly code. We
@ -1716,7 +1723,7 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
kernel_sec_start, kernel_sec_end);
prepare_page_table();

View file

@ -29,7 +29,7 @@ ENTRY(lpae_pgtables_remap_asm)
ldr r6, =(_end - 1)
add r7, r2, #0x1000
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
1: ldrd r4, r5, [r7]
adds r4, r4, r0
adc r5, r5, r1