diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index aca70fb43147..1d67bc8d7bc6 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -3,8 +3,9 @@ #define _ASM_POWERPC_SYNCH_H #ifdef __KERNEL__ +#include #include -#include +#include #ifndef __ASSEMBLY__ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; @@ -20,6 +21,22 @@ static inline void isync(void) { __asm__ __volatile__ ("isync" : : : "memory"); } + +static inline void ppc_after_tlbiel_barrier(void) +{ + asm volatile("ptesync": : :"memory"); + /* + * POWER9, POWER10 need a cp_abort after tlbiel to ensure the copy is + * invalidated correctly. If this is not done, the paste can take data + * from the physical address that was translated at copy time. + * + * POWER9 in practice does not need this, because address spaces with + * accelerators mapped will use tlbie (which does invalidate the copy) + * to invalidate translations. It's not possible to limit POWER10 this + * way due to local copy-paste. + */ + asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT, "", %0) : : "i" (CPU_FTR_ARCH_31) : "memory"); +} #endif /* __ASSEMBLY__ */ #if defined(__powerpc64__) diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c index cf20e5229ce1..0203cdf48c54 100644 --- a/arch/powerpc/mm/book3s64/hash_native.c +++ b/arch/powerpc/mm/book3s64/hash_native.c @@ -82,7 +82,7 @@ static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is) for (set = 0; set < num_sets; set++) tlbiel_hash_set_isa206(set, is); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) @@ -110,7 +110,7 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) */ tlbiel_hash_set_isa300(0, is, 0, 2, 1); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory"); } @@ -303,7 +303,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize, asm volatile("ptesync": : :"memory"); if (use_local) { __tlbiel(vpn, psize, apsize, ssize); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } else { __tlbie(vpn, psize, apsize, ssize); fixup_tlbie_vpn(vpn, psize, apsize, ssize); @@ -879,7 +879,7 @@ static void native_flush_hash_range(unsigned long number, int local) __tlbiel(vpn, psize, psize, ssize); } pte_iterate_hashed_end(); } - asm volatile("ptesync":::"memory"); + ppc_after_tlbiel_barrier(); } else { int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 143b4fd396f0..b487b489d4b6 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -65,7 +65,7 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) for (set = 1; set < num_sets; set++) tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } void radix__tlbiel_all(unsigned int action) @@ -296,7 +296,7 @@ static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric) /* For PWC, only one flush is needed */ if (ric == RIC_FLUSH_PWC) { - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); return; } @@ -304,7 +304,7 @@ static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric) for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) __tlbiel_pid(pid, set, RIC_FLUSH_TLB); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory"); } @@ -431,7 +431,7 @@ static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid, asm volatile("ptesync": : :"memory"); __tlbiel_va(va, pid, ap, ric); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } static inline void _tlbiel_va_range(unsigned long start, unsigned long end, @@ -442,7 +442,7 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end, if (also_pwc) __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); __tlbiel_va_range(start, end, pid, page_size, psize); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } static inline void __tlbie_va_range(unsigned long start, unsigned long end, @@ -949,7 +949,7 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, if (hflush) __tlbiel_va_range(hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } else if (cputlb_use_tlbie()) { asm volatile("ptesync": : :"memory"); __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);