Revert "powerpc/32s: reorder Linux PTE bits to better match Hash PTE bits."

This reverts commit 697ece78f8.

The implementation of SWAP on powerpc requires page protection
bits to not be one of the least significant PTE bits.

Until the SWAP implementation is changed and this requirement voids,
we have to keep at least _PAGE_RW outside of the 3 last bits.

For now, revert to previous PTE bits order. A further rework
may come later.

Fixes: 697ece78f8 ("powerpc/32s: reorder Linux PTE bits to better match Hash PTE bits.")
Reported-by: Rui Salvaterra <rsalvaterra@gmail.com>
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b34706f8de87f84d135abb5f3ede6b6f16fb1f41.1589969799.git.christophe.leroy@csgroup.eu
This commit is contained in:
Christophe Leroy 2020-05-20 10:23:45 +00:00 committed by Michael Ellerman
parent 249c9b0cd1
commit 40bb0e9042
3 changed files with 18 additions and 13 deletions

View File

@ -17,9 +17,9 @@
* updating the accessed and modified bits in the page table tree. * updating the accessed and modified bits in the page table tree.
*/ */
#define _PAGE_USER 0x001 /* usermode access allowed */ #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_RW 0x002 /* software: user write access allowed */ #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
#define _PAGE_PRESENT 0x004 /* software: pte contains a translation */ #define _PAGE_USER 0x004 /* usermode access allowed */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
@ -27,7 +27,7 @@
#define _PAGE_DIRTY 0x080 /* C: page changed */ #define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */ #define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */ #define _PAGE_EXEC 0x200 /* software: exec allowed */
#define _PAGE_HASHPTE 0x400 /* hash_page has made an HPTE for this pte */ #define _PAGE_RW 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */ #define _PAGE_SPECIAL 0x800 /* software: Special page */
#ifdef CONFIG_PTE_64BIT #ifdef CONFIG_PTE_64BIT

View File

@ -348,7 +348,7 @@ BEGIN_MMU_FTR_SECTION
andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
#endif #endif
bne handle_page_fault_tramp_2 /* if not, try to put a PTE */ bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
rlwinm r3, r5, 32 - 24, 30, 30 /* DSISR_STORE -> _PAGE_RW */ rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
bl hash_page bl hash_page
b handle_page_fault_tramp_1 b handle_page_fault_tramp_1
FTR_SECTION_ELSE FTR_SECTION_ELSE
@ -497,6 +497,7 @@ InstructionTLBMiss:
andc. r1,r1,r0 /* check access & ~permission */ andc. r1,r1,r0 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */ bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
ori r1, r1, 0xe06 /* clear out reserved bits */ ori r1, r1, 0xe06 /* clear out reserved bits */
andc r1, r0, r1 /* PP = user? 1 : 0 */ andc r1, r0, r1 /* PP = user? 1 : 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
@ -564,8 +565,9 @@ DataLoadTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx. * we would need to update the pte atomically with lwarx/stwcx.
*/ */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
rlwinm r1,r0,0,30,30 /* _PAGE_RW -> PP msb */ rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwimi r0,r0,1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe04 /* clear out reserved bits */ ori r1,r1,0xe04 /* clear out reserved bits */
andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */ andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
@ -643,6 +645,7 @@ DataStoreTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx. * we would need to update the pte atomically with lwarx/stwcx.
*/ */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
li r1,0xe06 /* clear out reserved bits & PP msb */ li r1,0xe06 /* clear out reserved bits & PP msb */
andc r1,r0,r1 /* PP = user? 1: 0 */ andc r1,r0,r1 /* PP = user? 1: 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION

View File

@ -35,7 +35,7 @@ mmu_hash_lock:
/* /*
* Load a PTE into the hash table, if possible. * Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag: * The address is in r4, and r3 contains an access flag:
* _PAGE_RW (0x002) if a write. * _PAGE_RW (0x400) if a write.
* r9 contains the SRR1 value, from which we use the MSR_PR bit. * r9 contains the SRR1 value, from which we use the MSR_PR bit.
* SPRG_THREAD contains the physical address of the current task's thread. * SPRG_THREAD contains the physical address of the current task's thread.
* *
@ -69,7 +69,7 @@ _GLOBAL(hash_page)
blt+ 112f /* assume user more likely */ blt+ 112f /* assume user more likely */
lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
rlwimi r3,r9,32-14,31,31 /* MSR_PR -> _PAGE_USER */ rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
112: 112:
#ifndef CONFIG_PTE_64BIT #ifndef CONFIG_PTE_64BIT
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
@ -94,7 +94,7 @@ _GLOBAL(hash_page)
#else #else
rlwimi r8,r4,23,20,28 /* compute pte address */ rlwimi r8,r4,23,20,28 /* compute pte address */
#endif #endif
rlwinm r0,r3,6,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
/* /*
@ -310,9 +310,11 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
_GLOBAL(create_hpte) _GLOBAL(create_hpte)
/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */ rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
and r8,r5,r0 /* writable if _RW & _DIRTY */ and r8,r8,r0 /* writable if _RW & _DIRTY */
rlwimi r5,r5,1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
ori r8,r8,0xe04 /* clear out reserved bits */ ori r8,r8,0xe04 /* clear out reserved bits */
andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */ andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
@ -564,7 +566,7 @@ _GLOBAL(flush_hash_pages)
33: lwarx r8,0,r5 /* fetch the pte flags word */ 33: lwarx r8,0,r5 /* fetch the pte flags word */
andi. r0,r8,_PAGE_HASHPTE andi. r0,r8,_PAGE_HASHPTE
beq 8f /* done if HASHPTE is already clear */ beq 8f /* done if HASHPTE is already clear */
rlwinm r8,r8,0,~_PAGE_HASHPTE /* clear HASHPTE bit */ rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
stwcx. r8,0,r5 /* update the pte */ stwcx. r8,0,r5 /* update the pte */
bne- 33b bne- 33b