mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 08:58:07 +00:00
ec6347bb43
In reaction to a proposal to introduce a memcpy_mcsafe_fast() implementation Linus points out that memcpy_mcsafe() is poorly named relative to communicating the scope of the interface. Specifically what addresses are valid to pass as source, destination, and what faults / exceptions are handled. Of particular concern is that even though x86 might be able to handle the semantics of copy_mc_to_user() with its common copy_user_generic() implementation other archs likely need / want an explicit path for this case: On Fri, May 1, 2020 at 11:28 AM Linus Torvalds <torvalds@linux-foundation.org> wrote: > > On Thu, Apr 30, 2020 at 6:21 PM Dan Williams <dan.j.williams@intel.com> wrote: > > > > However now I see that copy_user_generic() works for the wrong reason. > > It works because the exception on the source address due to poison > > looks no different than a write fault on the user address to the > > caller, it's still just a short copy. So it makes copy_to_user() work > > for the wrong reason relative to the name. > > Right. > > And it won't work that way on other architectures. On x86, we have a > generic function that can take faults on either side, and we use it > for both cases (and for the "in_user" case too), but that's an > artifact of the architecture oddity. > > In fact, it's probably wrong even on x86 - because it can hide bugs - > but writing those things is painful enough that everybody prefers > having just one function. Replace a single top-level memcpy_mcsafe() with either copy_mc_to_user(), or copy_mc_to_kernel(). Introduce an x86 copy_mc_fragile() name as the rename for the low-level x86 implementation formerly named memcpy_mcsafe(). It is used as the slow / careful backend that is supplanted by a fast copy_mc_generic() in a follow-on patch. One side-effect of this reorganization is that separating copy_mc_64.S to its own file means that perf no longer needs to track dependencies for its memcpy_64.S benchmarks. [ bp: Massage a bit. ] Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Tony Luck <tony.luck@intel.com> Acked-by: Michael Ellerman <mpe@ellerman.id.au> Cc: <stable@vger.kernel.org> Link: http://lore.kernel.org/r/CAHk-=wjSqtXAqfUJxFtWNwmguFASTgB0dz1dT3V-78Quiezqbg@mail.gmail.com Link: https://lkml.kernel.org/r/160195561680.2163339.11574962055305783722.stgit@dwillia2-desk3.amr.corp.intel.com
110 lines
2.8 KiB
C
110 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_STRING_64_H
|
|
#define _ASM_X86_STRING_64_H
|
|
|
|
#ifdef __KERNEL__
|
|
#include <linux/jump_label.h>
|
|
|
|
/* Written 2002 by Andi Kleen */
|
|
|
|
/* Even with __builtin_ the compiler may decide to use the out of line
|
|
function. */
|
|
|
|
#define __HAVE_ARCH_MEMCPY 1
|
|
extern void *memcpy(void *to, const void *from, size_t len);
|
|
extern void *__memcpy(void *to, const void *from, size_t len);
|
|
|
|
#define __HAVE_ARCH_MEMSET
|
|
void *memset(void *s, int c, size_t n);
|
|
void *__memset(void *s, int c, size_t n);
|
|
|
|
#define __HAVE_ARCH_MEMSET16
|
|
static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
|
|
{
|
|
long d0, d1;
|
|
asm volatile("rep\n\t"
|
|
"stosw"
|
|
: "=&c" (d0), "=&D" (d1)
|
|
: "a" (v), "1" (s), "0" (n)
|
|
: "memory");
|
|
return s;
|
|
}
|
|
|
|
#define __HAVE_ARCH_MEMSET32
|
|
static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
|
|
{
|
|
long d0, d1;
|
|
asm volatile("rep\n\t"
|
|
"stosl"
|
|
: "=&c" (d0), "=&D" (d1)
|
|
: "a" (v), "1" (s), "0" (n)
|
|
: "memory");
|
|
return s;
|
|
}
|
|
|
|
#define __HAVE_ARCH_MEMSET64
|
|
static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
|
|
{
|
|
long d0, d1;
|
|
asm volatile("rep\n\t"
|
|
"stosq"
|
|
: "=&c" (d0), "=&D" (d1)
|
|
: "a" (v), "1" (s), "0" (n)
|
|
: "memory");
|
|
return s;
|
|
}
|
|
|
|
#define __HAVE_ARCH_MEMMOVE
|
|
void *memmove(void *dest, const void *src, size_t count);
|
|
void *__memmove(void *dest, const void *src, size_t count);
|
|
|
|
int memcmp(const void *cs, const void *ct, size_t count);
|
|
size_t strlen(const char *s);
|
|
char *strcpy(char *dest, const char *src);
|
|
char *strcat(char *dest, const char *src);
|
|
int strcmp(const char *cs, const char *ct);
|
|
|
|
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
|
|
|
|
/*
|
|
* For files that not instrumented (e.g. mm/slub.c) we
|
|
* should use not instrumented version of mem* functions.
|
|
*/
|
|
|
|
#undef memcpy
|
|
#define memcpy(dst, src, len) __memcpy(dst, src, len)
|
|
#define memmove(dst, src, len) __memmove(dst, src, len)
|
|
#define memset(s, c, n) __memset(s, c, n)
|
|
|
|
#ifndef __NO_FORTIFY
|
|
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
|
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
|
|
void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
|
|
static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
|
|
{
|
|
if (__builtin_constant_p(cnt)) {
|
|
switch (cnt) {
|
|
case 4:
|
|
asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
|
|
return;
|
|
case 8:
|
|
asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
|
|
return;
|
|
case 16:
|
|
asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
|
|
asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
|
|
return;
|
|
}
|
|
}
|
|
__memcpy_flushcache(dst, src, cnt);
|
|
}
|
|
#endif
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* _ASM_X86_STRING_64_H */
|