powerpc/8xx: Simplify flush_tlb_kernel_range()

In the same spirit as commit 63f501e07a ("powerpc/8xx: Simplify TLB
handling"), simplify flush_tlb_kernel_range() for 8xx.

8xx cannot be SMP, and has 'tlbie' and 'tlbia' instructions, so
an inline version of flush_tlb_kernel_range() for 8xx is worth it.

With this page, first leg of change_page_attr() is:

	  2c:	55 29 00 3c 	rlwinm  r9,r9,0,0,30
	  30:	91 23 00 00 	stw     r9,0(r3)
	  34:	7c 00 22 64 	tlbie   r4,r0
	  38:	7c 00 04 ac 	hwsync
	  3c:	38 60 00 00 	li      r3,0
	  40:	4e 80 00 20 	blr

Before the patch it was:

	  30:	55 29 00 3c 	rlwinm  r9,r9,0,0,30
	  34:	91 2a 00 00 	stw     r9,0(r10)
	  38:	94 21 ff f0 	stwu    r1,-16(r1)
	  3c:	7c 08 02 a6 	mflr    r0
	  40:	38 83 10 00 	addi    r4,r3,4096
	  44:	90 01 00 14 	stw     r0,20(r1)
	  48:	48 00 00 01 	bl      48 <change_page_attr+0x48>
				48: R_PPC_REL24	flush_tlb_kernel_range
	  4c:	80 01 00 14 	lwz     r0,20(r1)
	  50:	38 60 00 00 	li      r3,0
	  54:	7c 08 03 a6 	mtlr    r0
	  58:	38 21 00 10 	addi    r1,r1,16
	  5c:	4e 80 00 20 	blr

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d2610043419ce3e0e53a85386baf2c3625af5cfb.1647877442.git.christophe.leroy@csgroup.eu
This commit is contained in:
Christophe Leroy 2022-03-21 16:44:18 +01:00 committed by Michael Ellerman
parent e59596a2d6
commit 9290c379d1
2 changed files with 13 additions and 1 deletions

View File

@ -30,7 +30,6 @@ struct mm_struct;
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_PPC_8xx
static inline void local_flush_tlb_mm(struct mm_struct *mm)
@ -45,7 +44,18 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned lon
{
asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory");
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
start &= PAGE_MASK;
if (end - start <= PAGE_SIZE)
asm volatile ("tlbie %0; sync" : : "r" (start) : "memory");
else
asm volatile ("sync; tlbia; isync" : : : "memory");
}
#else
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);

View File

@ -358,6 +358,7 @@ void __init early_init_mmu_47x(void)
/*
* Flush kernel TLB entries in the given range
*/
#ifndef CONFIG_PPC_8xx
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
@ -370,6 +371,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
#endif
}
EXPORT_SYMBOL(flush_tlb_kernel_range);
#endif
/*
* Currently, for range flushing, we just do a full mm flush. This should