From 987c426320cce72d1b28f55c8603b239e4f7187c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 11 Nov 2020 22:01:51 +1000 Subject: [PATCH] powerpc/64s/perf: perf interrupt does not have to get_user_pages to access user memory read_user_stack_slow that walks user address translation by hand is only required on hash, because a hash fault can not be serviced from "NMI" context (to avoid re-entering the hash code) so the user stack can be mapped into Linux page tables but not accessible by the CPU. Radix MMU mode does not have this restriction. A page fault failure would indicate the page is not accessible via get_user_pages either, so avoid this on radix. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201111120151.3150658-1-npiggin@gmail.com --- arch/powerpc/perf/callchain.h | 2 +- arch/powerpc/perf/callchain_64.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/perf/callchain.h b/arch/powerpc/perf/callchain.h index ae24d4a00da6..d6fa6e25234f 100644 --- a/arch/powerpc/perf/callchain.h +++ b/arch/powerpc/perf/callchain.h @@ -33,7 +33,7 @@ static inline int __read_user_stack(const void __user *ptr, void *ret, rc = copy_from_user_nofault(ret, ptr, size); - if (IS_ENABLED(CONFIG_PPC64) && rc) + if (IS_ENABLED(CONFIG_PPC64) && !radix_enabled() && rc) return read_user_stack_slow(ptr, ret, size); return rc; diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c index fed90e827f3a..0777b04a0c56 100644 --- a/arch/powerpc/perf/callchain_64.c +++ b/arch/powerpc/perf/callchain_64.c @@ -21,7 +21,8 @@ /* * On 64-bit we don't want to invoke hash_page on user addresses from * interrupt context, so if the access faults, we read the page tables - * to find which page (if any) is mapped and access it directly. + * to find which page (if any) is mapped and access it directly. Radix + * has no need for this so it doesn't use read_user_stack_slow. */ int read_user_stack_slow(const void __user *ptr, void *buf, int nb) {