hexagon: use generic strncpy/strnlen from_user

Remove the hexagon implementation of strncpy/strnlen and instead use
the generic version.  The hexagon version reads the data twice for
strncpy() by doing an extra strnlen(), and it apparently lacks a check
for user_addr_max().

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2020-01-16 15:58:41 +01:00
parent 2f69b04a88
commit 2820cfdc08
5 changed files with 5 additions and 159 deletions

View File

@ -19,6 +19,8 @@ config HEXAGON
# GENERIC_ALLOCATOR is used by dma_alloc_coherent()
select GENERIC_ALLOCATOR
select GENERIC_IRQ_SHOW
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select NEED_SG_DMA_LENGTH

View File

@ -57,42 +57,13 @@ unsigned long raw_copy_to_user(void __user *to, const void *from,
__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
#define __clear_user(a, s) __clear_user_hexagon((a), (s))
extern long __strnlen_user(const char __user *src, long n);
static inline strnlen_user(const char __user *src, long n)
{
if (!access_ok(src, 1))
return 0;
return __strnlen_user(src, n);
}
/* get around the ifndef in asm-generic/uaccess.h */
extern long strnlen_user(const char __user *src, long n);
#define strnlen_user strnlen_user
static inline long strncpy_from_user(char *dst, const char __user *src, long n);
extern long strncpy_from_user(char *dst, const char __user *src, long n)
#define strncpy_from_user strncpy_from_user
#include <asm-generic/uaccess.h>
/* Todo: an actual accelerated version of this. */
static inline long strncpy_from_user(char *dst, const char __user *src, long n)
{
long res = strnlen_user(src, n);
if (unlikely(!res))
return -EFAULT;
if (res > n) {
long left = raw_copy_from_user(dst, src, n);
if (unlikely(left))
memset(dst + (n - left), 0, left);
return n;
} else {
long left = raw_copy_from_user(dst, src, res);
if (unlikely(left))
memset(dst + (res - left), 0, left);
return res-1;
}
}
#endif

View File

@ -15,7 +15,6 @@ EXPORT_SYMBOL(__clear_user_hexagon);
EXPORT_SYMBOL(raw_copy_from_user);
EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__vmgetie);
EXPORT_SYMBOL(__vmsetie);
EXPORT_SYMBOL(__vmyield);

View File

@ -4,4 +4,4 @@
#
obj-y := init.o ioremap.o uaccess.o vm_fault.o cache.o
obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
obj-y += copy_to_user.o copy_from_user.o vm_tlb.o

View File

@ -1,126 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* User string length functions for kernel
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#define isrc r0
#define max r1 /* Do not change! */
#define end r2
#define tmp1 r3
#define obo r6 /* off-by-one */
#define start r7
#define mod8 r8
#define dbuf r15:14
#define dcmp r13:12
/*
* The vector mask version of this turned out *really* badly.
* The hardware loop version also turned out *really* badly.
* Seems straight pointer arithmetic basically wins here.
*/
#define fname __strnlen_user
.text
.global fname
.type fname, @function
.p2align 5 /* why? */
fname:
{
mod8 = and(isrc,#7);
end = add(isrc,max);
start = isrc;
}
{
P0 = cmp.eq(mod8,#0);
mod8 = and(end,#7);
dcmp = #0;
if (P0.new) jump:t dw_loop; /* fire up the oven */
}
alignment_loop:
fail_1: {
tmp1 = memb(start++#1);
}
{
P0 = cmp.eq(tmp1,#0);
if (P0.new) jump:nt exit_found;
P1 = cmp.gtu(end,start);
mod8 = and(start,#7);
}
{
if (!P1) jump exit_error; /* hit the end */
P0 = cmp.eq(mod8,#0);
}
{
if (!P0) jump alignment_loop;
}
dw_loop:
fail_2: {
dbuf = memd(start);
obo = add(start,#1);
}
{
P0 = vcmpb.eq(dbuf,dcmp);
}
{
tmp1 = P0;
P0 = cmp.gtu(end,start);
}
{
tmp1 = ct0(tmp1);
mod8 = and(end,#7);
if (!P0) jump end_check;
}
{
P0 = cmp.eq(tmp1,#32);
if (!P0.new) jump:nt exit_found;
if (!P0.new) start = add(obo,tmp1);
}
{
start = add(start,#8);
jump dw_loop;
} /* might be nice to combine these jumps... */
end_check:
{
P0 = cmp.gt(tmp1,mod8);
if (P0.new) jump:nt exit_error; /* neverfound! */
start = add(obo,tmp1);
}
exit_found:
{
R0 = sub(start,isrc);
jumpr R31;
}
exit_error:
{
R0 = add(max,#1);
jumpr R31;
}
/* Uh, what does the "fixup" return here? */
.falign
fix_1:
{
R0 = #0;
jumpr R31;
}
.size fname,.-fname
.section __ex_table,"a"
.long fail_1,fix_1
.long fail_2,fix_1
.previous