x86/boot: Rename overlapping memcpy() to memmove()

Instead of having non-standard memcpy() behavior, explicitly call the new
function memmove(), make it available to the decompressors, and switch
the two overlap cases (screen scrolling and ELF parsing) to use memmove().
Additionally documents the purpose of compressed/string.c.

Suggested-by: Lasse Collin <lasse.collin@tukaani.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: H.J. Lu <hjl.tools@gmail.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/20160426214606.GA5758@www.outflux.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Kees Cook 2016-04-26 14:46:06 -07:00 committed by Ingo Molnar
parent a50b22a7a1
commit 81b785f3e4
2 changed files with 15 additions and 10 deletions

View file

@ -32,9 +32,11 @@
#undef memcpy
#undef memset
#define memzero(s, n) memset((s), 0, (n))
#define memmove memmove
/* Functions used by the included decompressor code below. */
static void error(char *m);
void *memmove(void *dest, const void *src, size_t n);
/*
* This is set up by the setup-routine at boot-time
@ -80,7 +82,7 @@ static void scroll(void)
{
int i;
memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
memmove(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
vidmem[i] = ' ';
}
@ -307,7 +309,7 @@ static void parse_elf(void *output)
#else
dest = (void *)(phdr->p_paddr);
#endif
memcpy(dest, output + phdr->p_offset, phdr->p_filesz);
memmove(dest, output + phdr->p_offset, phdr->p_filesz);
break;
default: /* Ignore other PT_* */ break;
}

View file

@ -1,7 +1,14 @@
/*
* This provides an optimized implementation of memcpy, and a simplified
* implementation of memset and memmove. These are used here because the
* standard kernel runtime versions are not yet available and we don't
* trust the gcc built-in implementations as they may do unexpected things
* (e.g. FPU ops) in the minimal decompression stub execution environment.
*/
#include "../string.c"
#ifdef CONFIG_X86_32
void *__memcpy(void *dest, const void *src, size_t n)
void *memcpy(void *dest, const void *src, size_t n)
{
int d0, d1, d2;
asm volatile(
@ -15,7 +22,7 @@ void *__memcpy(void *dest, const void *src, size_t n)
return dest;
}
#else
void *__memcpy(void *dest, const void *src, size_t n)
void *memcpy(void *dest, const void *src, size_t n)
{
long d0, d1, d2;
asm volatile(
@ -40,17 +47,13 @@ void *memset(void *s, int c, size_t n)
return s;
}
/*
* This memcpy is overlap safe (i.e. it is memmove without conflicting
* with other definitions of memmove from the various decompressors.
*/
void *memcpy(void *dest, const void *src, size_t n)
void *memmove(void *dest, const void *src, size_t n)
{
unsigned char *d = dest;
const unsigned char *s = src;
if (d <= s || d - s >= n)
return __memcpy(dest, src, n);
return memcpy(dest, src, n);
while (n-- > 0)
d[n] = s[n];