Merge branch 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull copy_and_csum cleanups from Al Viro:
 "Saner calling conventions for csum_and_copy_..._user() and friends"

[ Removing 800+ lines of code and cleaning stuff up is good  - Linus ]

* 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  ppc: propagate the calling conventions change down to csum_partial_copy_generic()
  amd64: switch csum_partial_copy_generic() to new calling conventions
  sparc64: propagate the calling convention changes down to __csum_partial_copy_...()
  xtensa: propagate the calling conventions change down into csum_partial_copy_generic()
  mips: propagate the calling convention change down into __csum_partial_copy_..._user()
  mips: __csum_partial_copy_kernel() has no users left
  mips: csum_and_copy_{to,from}_user() are never called under KERNEL_DS
  sparc32: propagate the calling conventions change down to __csum_partial_copy_sparc_generic()
  i386: propagate the calling conventions change down to csum_partial_copy_generic()
  sh: propage the calling conventions change down to csum_partial_copy_generic()
  m68k: get rid of zeroing destination on error in csum_and_copy_from_user()
  arm: propagate the calling convention changes down to csum_partial_copy_from_user()
  alpha: propagate the calling convention changes down to csum_partial_copy.c helpers
  saner calling conventions for csum_and_copy_..._user()
  csum_and_copy_..._user(): pass 0xffffffff instead of 0 as initial sum
  csum_partial_copy_nocheck(): drop the last argument
  unify generic instances of csum_partial_copy_nocheck()
  icmp_push_reply(): reorder adding the checksum up
  skb_copy_and_csum_bits(): don't bother with the last argument
This commit is contained in:
Linus Torvalds 2020-10-12 16:24:13 -07:00
commit c90578360c
59 changed files with 614 additions and 1467 deletions

View File

@ -42,9 +42,10 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp);
#define _HAVE_ARCH_CSUM_AND_COPY
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
/*

View File

@ -39,12 +39,11 @@ __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
#define insqh(x,y,z) \
__asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
#define __get_user_u(x,ptr) \
#define __get_word(insn,x,ptr) \
({ \
long __guu_err; \
__asm__ __volatile__( \
"1: ldq_u %0,%2\n" \
"1: "#insn" %0,%2\n" \
"2:\n" \
EXC(1b,2b,%0,%1) \
: "=r"(x), "=r"(__guu_err) \
@ -52,19 +51,6 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
__guu_err; \
})
#define __put_user_u(x,ptr) \
({ \
long __puu_err; \
__asm__ __volatile__( \
"1: stq_u %2,%1\n" \
"2:\n" \
EXC(1b,2b,$31,%0) \
: "=r"(__puu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(0)); \
__puu_err; \
})
static inline unsigned short from64to16(unsigned long x)
{
/* Using extract instructions is a bit more efficient
@ -95,15 +81,15 @@ static inline unsigned short from64to16(unsigned long x)
*/
static inline unsigned long
csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
long len, unsigned long checksum,
int *errp)
long len)
{
unsigned long checksum = ~0U;
unsigned long carry = 0;
int err = 0;
while (len >= 0) {
unsigned long word;
err |= __get_user(word, src);
if (__get_word(ldq, word, src))
return 0;
checksum += carry;
src++;
checksum += word;
@ -116,7 +102,8 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
checksum += carry;
if (len) {
unsigned long word, tmp;
err |= __get_user(word, src);
if (__get_word(ldq, word, src))
return 0;
tmp = *dst;
mskql(word, len, word);
checksum += word;
@ -125,7 +112,6 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
*dst = word | tmp;
checksum += carry;
}
if (err && errp) *errp = err;
return checksum;
}
@ -137,20 +123,21 @@ static inline unsigned long
csum_partial_cfu_dest_aligned(const unsigned long __user *src,
unsigned long *dst,
unsigned long soff,
long len, unsigned long checksum,
int *errp)
long len)
{
unsigned long first;
unsigned long word, carry;
unsigned long lastsrc = 7+len+(unsigned long)src;
int err = 0;
unsigned long checksum = ~0U;
err |= __get_user_u(first,src);
if (__get_word(ldq_u, first,src))
return 0;
carry = 0;
while (len >= 0) {
unsigned long second;
err |= __get_user_u(second, src+1);
if (__get_word(ldq_u, second, src+1))
return 0;
extql(first, soff, word);
len -= 8;
src++;
@ -168,7 +155,8 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
if (len) {
unsigned long tmp;
unsigned long second;
err |= __get_user_u(second, lastsrc);
if (__get_word(ldq_u, second, lastsrc))
return 0;
tmp = *dst;
extql(first, soff, word);
extqh(second, soff, first);
@ -180,7 +168,6 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
*dst = word | tmp;
checksum += carry;
}
if (err && errp) *errp = err;
return checksum;
}
@ -191,18 +178,18 @@ static inline unsigned long
csum_partial_cfu_src_aligned(const unsigned long __user *src,
unsigned long *dst,
unsigned long doff,
long len, unsigned long checksum,
unsigned long partial_dest,
int *errp)
long len,
unsigned long partial_dest)
{
unsigned long carry = 0;
unsigned long word;
unsigned long second_dest;
int err = 0;
unsigned long checksum = ~0U;
mskql(partial_dest, doff, partial_dest);
while (len >= 0) {
err |= __get_user(word, src);
if (__get_word(ldq, word, src))
return 0;
len -= 8;
insql(word, doff, second_dest);
checksum += carry;
@ -216,7 +203,8 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
len += 8;
if (len) {
checksum += carry;
err |= __get_user(word, src);
if (__get_word(ldq, word, src))
return 0;
mskql(word, len, word);
len -= 8;
checksum += word;
@ -237,7 +225,6 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
stq_u(partial_dest | second_dest, dst);
out:
checksum += carry;
if (err && errp) *errp = err;
return checksum;
}
@ -249,23 +236,23 @@ static inline unsigned long
csum_partial_cfu_unaligned(const unsigned long __user * src,
unsigned long * dst,
unsigned long soff, unsigned long doff,
long len, unsigned long checksum,
unsigned long partial_dest,
int *errp)
long len, unsigned long partial_dest)
{
unsigned long carry = 0;
unsigned long first;
unsigned long lastsrc;
int err = 0;
unsigned long checksum = ~0U;
err |= __get_user_u(first, src);
if (__get_word(ldq_u, first, src))
return 0;
lastsrc = 7+len+(unsigned long)src;
mskql(partial_dest, doff, partial_dest);
while (len >= 0) {
unsigned long second, word;
unsigned long second_dest;
err |= __get_user_u(second, src+1);
if (__get_word(ldq_u, second, src+1))
return 0;
extql(first, soff, word);
checksum += carry;
len -= 8;
@ -286,7 +273,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
unsigned long second, word;
unsigned long second_dest;
err |= __get_user_u(second, lastsrc);
if (__get_word(ldq_u, second, lastsrc))
return 0;
extql(first, soff, word);
extqh(second, soff, first);
word |= first;
@ -307,7 +295,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
unsigned long second, word;
unsigned long second_dest;
err |= __get_user_u(second, lastsrc);
if (__get_word(ldq_u, second, lastsrc))
return 0;
extql(first, soff, word);
extqh(second, soff, first);
word |= first;
@ -320,66 +309,55 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
stq_u(partial_dest | word | second_dest, dst);
checksum += carry;
}
if (err && errp) *errp = err;
return checksum;
}
__wsum
csum_and_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *errp)
static __wsum __csum_and_copy(const void __user *src, void *dst, int len)
{
unsigned long checksum = (__force u32) sum;
unsigned long soff = 7 & (unsigned long) src;
unsigned long doff = 7 & (unsigned long) dst;
unsigned long checksum;
if (len) {
if (!access_ok(src, len)) {
if (errp) *errp = -EFAULT;
memset(dst, 0, len);
return sum;
}
if (!doff) {
if (!soff)
checksum = csum_partial_cfu_aligned(
(const unsigned long __user *) src,
(unsigned long *) dst,
len-8, checksum, errp);
else
checksum = csum_partial_cfu_dest_aligned(
(const unsigned long __user *) src,
(unsigned long *) dst,
soff, len-8, checksum, errp);
} else {
unsigned long partial_dest;
ldq_u(partial_dest, dst);
if (!soff)
checksum = csum_partial_cfu_src_aligned(
(const unsigned long __user *) src,
(unsigned long *) dst,
doff, len-8, checksum,
partial_dest, errp);
else
checksum = csum_partial_cfu_unaligned(
(const unsigned long __user *) src,
(unsigned long *) dst,
soff, doff, len-8, checksum,
partial_dest, errp);
}
checksum = from64to16 (checksum);
if (!doff) {
if (!soff)
checksum = csum_partial_cfu_aligned(
(const unsigned long __user *) src,
(unsigned long *) dst, len-8);
else
checksum = csum_partial_cfu_dest_aligned(
(const unsigned long __user *) src,
(unsigned long *) dst,
soff, len-8);
} else {
unsigned long partial_dest;
ldq_u(partial_dest, dst);
if (!soff)
checksum = csum_partial_cfu_src_aligned(
(const unsigned long __user *) src,
(unsigned long *) dst,
doff, len-8, partial_dest);
else
checksum = csum_partial_cfu_unaligned(
(const unsigned long __user *) src,
(unsigned long *) dst,
soff, doff, len-8, partial_dest);
}
return (__force __wsum)checksum;
return (__force __wsum)from64to16 (checksum);
}
__wsum
csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
if (!access_ok(src, len))
return 0;
return __csum_and_copy(src, dst, len);
}
EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
__wsum checksum;
mm_segment_t oldfs = get_fs();
set_fs(KERNEL_DS);
checksum = csum_and_copy_from_user((__force const void __user *)src,
dst, len, sum, NULL);
set_fs(oldfs);
return checksum;
return __csum_and_copy((__force const void __user *)src,
dst, len);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);

View File

@ -35,23 +35,20 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
csum_partial_copy_nocheck(const void *src, void *dst, int len);
__wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
csum_partial_copy_from_user(const void __user *src, void *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
#define _HAVE_ARCH_CSUM_AND_COPY
static inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
if (access_ok(src, len))
return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
if (!access_ok(src, len))
return 0;
if (len)
*err_ptr = -EFAULT;
return sum;
return csum_partial_copy_from_user(src, dst, len);
}
/*

View File

@ -9,8 +9,8 @@
.text
/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum)
* Params : r0 = src, r1 = dst, r2 = len, r3 = checksum
/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len)
* Params : r0 = src, r1 = dst, r2 = len
* Returns : r0 = new checksum
*/

View File

@ -86,6 +86,7 @@ sum .req r3
FN_ENTRY
save_regs
mov sum, #-1
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.

View File

@ -62,9 +62,9 @@
/*
* unsigned int
* csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr)
* r0 = src, r1 = dst, r2 = len, r3 = sum, [sp] = *err_ptr
* Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
* csum_partial_copy_from_user(const char *src, char *dst, int len)
* r0 = src, r1 = dst, r2 = len
* Returns : r0 = checksum or 0
*/
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
@ -73,25 +73,11 @@
#include "csumpartialcopygeneric.S"
/*
* FIXME: minor buglet here
* We don't return the checksum for the data present in the buffer. To do
* so properly, we would have to add in whatever registers were loaded before
* the fault, which, with the current asm above is not predictable.
* We report fault by returning 0 csum - impossible in normal case, since
* we start with 0xffffffff for initial sum.
*/
.pushsection .text.fixup,"ax"
.align 4
9001: mov r4, #-EFAULT
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
ldr r5, [sp, #9*4] @ *err_ptr
#else
ldr r5, [sp, #8*4] @ *err_ptr
#endif
str r4, [r5]
ldmia sp, {r1, r2} @ retrieve dst, len
add r2, r2, r1
mov r0, #0 @ zero the buffer
9002: teq r2, r1
strbne r0, [r1], #1
bne 9002b
9001: mov r0, #0
load_regs
.popsection

View File

@ -26,6 +26,9 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
}
#define csum_tcpudp_nofold csum_tcpudp_nofold
#define _HAVE_ARCH_CSUM_AND_COPY
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
#include <asm-generic/checksum.h>
#endif /* _ASM_C6X_CHECKSUM_H */

View File

@ -24,7 +24,6 @@
ENTRY(csum_partial_copy_nocheck)
MVC .S2 ILC,B30
MV .D1X B6,A31 ; given csum
ZERO .D1 A9 ; csum (a side)
|| ZERO .D2 B9 ; csum (b side)
|| SHRU .S2X A6,2,B5 ; len / 4
@ -144,8 +143,7 @@ L91: SHRU .S2X A9,16,B4
SHRU .S1 A9,16,A0
[A0] BNOP .S1 L91,5
L10: ADD .D1 A31,A9,A9
MV .D1 A9,A4
L10: MV .D1 A9,A4
BNOP .S2 B3,4
MVC .S2 B30,ILC

View File

@ -9,17 +9,6 @@
#define do_csum do_csum
unsigned int do_csum(const void *voidptr, int len);
/*
* the same as csum_partial, but copies from src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
#define csum_partial_copy_nocheck csum_partial_copy_nocheck
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented

View File

@ -176,14 +176,3 @@ unsigned int do_csum(const void *voidptr, int len)
return 0xFFFF & sum0;
}
/*
* copy from ds while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);

View File

@ -37,9 +37,6 @@ extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
/*
* This routine is used for miscellaneous IP-like checksums, mainly in
* icmp.c

View File

@ -96,18 +96,3 @@ unsigned long do_csum_c(const unsigned char * buff, int len, unsigned int psum)
out:
return result;
}
/*
* XXX Fixme
*
* This is very ugly but temporary. THIS NEEDS SERIOUS ENHANCEMENTS.
* But it's very tricky to get right even in C.
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);

View File

@ -31,14 +31,13 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
#define _HAVE_ARCH_CSUM_AND_COPY
extern __wsum csum_and_copy_from_user(const void __user *src,
void *dst,
int len, __wsum sum,
int *csum_err);
int len);
extern __wsum csum_partial_copy_nocheck(const void *src,
void *dst, int len,
__wsum sum);
void *dst, int len);
/*
* This is a version of ip_fast_csum() optimized for IP headers,

View File

@ -129,8 +129,7 @@ EXPORT_SYMBOL(csum_partial);
*/
__wsum
csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *csum_err)
csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
/*
* GCC doesn't like more than 10 operands for the asm
@ -138,6 +137,7 @@ csum_and_copy_from_user(const void __user *src, void *dst,
* code.
*/
unsigned long tmp1, tmp2;
__wsum sum = ~0U;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
@ -236,84 +236,33 @@ csum_and_copy_from_user(const void __user *src, void *dst,
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"7:\t"
"clrl %5\n" /* no error - clear return value */
"8:\n"
".section .fixup,\"ax\"\n"
".even\n"
/* If any exception occurs zero out the rest.
Similarities with the code above are intentional :-) */
/* If any exception occurs, return 0 */
"90:\t"
"clrw %3@+\n\t"
"movel %1,%4\n\t"
"lsrl #5,%1\n\t"
"jeq 1f\n\t"
"subql #1,%1\n"
"91:\t"
"clrl %3@+\n"
"92:\t"
"clrl %3@+\n"
"93:\t"
"clrl %3@+\n"
"94:\t"
"clrl %3@+\n"
"95:\t"
"clrl %3@+\n"
"96:\t"
"clrl %3@+\n"
"97:\t"
"clrl %3@+\n"
"98:\t"
"clrl %3@+\n\t"
"dbra %1,91b\n\t"
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 91b\n"
"1:\t"
"movel %4,%1\n\t"
"andw #0x1c,%4\n\t"
"jeq 1f\n\t"
"lsrw #2,%4\n\t"
"subqw #1,%4\n"
"99:\t"
"clrl %3@+\n\t"
"dbra %4,99b\n\t"
"1:\t"
"andw #3,%1\n\t"
"jeq 9f\n"
"100:\t"
"clrw %3@+\n\t"
"tstw %1\n\t"
"jeq 9f\n"
"101:\t"
"clrb %3@+\n"
"9:\t"
#define STR(X) STR1(X)
#define STR1(X) #X
"moveq #-" STR(EFAULT) ",%5\n\t"
"jra 8b\n"
"clrl %0\n"
"jra 7b\n"
".previous\n"
".section __ex_table,\"a\"\n"
".long 10b,90b\n"
".long 11b,91b\n"
".long 12b,92b\n"
".long 13b,93b\n"
".long 14b,94b\n"
".long 15b,95b\n"
".long 16b,96b\n"
".long 17b,97b\n"
".long 18b,98b\n"
".long 19b,99b\n"
".long 20b,100b\n"
".long 21b,101b\n"
".long 11b,90b\n"
".long 12b,90b\n"
".long 13b,90b\n"
".long 14b,90b\n"
".long 15b,90b\n"
".long 16b,90b\n"
".long 17b,90b\n"
".long 18b,90b\n"
".long 19b,90b\n"
".long 20b,90b\n"
".long 21b,90b\n"
".previous"
: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
"=&d" (tmp1), "=d" (tmp2)
: "0" (sum), "1" (len), "2" (src), "3" (dst)
);
*csum_err = tmp2;
return(sum);
return sum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@ -324,9 +273,10 @@ EXPORT_SYMBOL(csum_and_copy_from_user);
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
unsigned long tmp1, tmp2;
__wsum sum = 0;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
"jeq 2f\n\t"

View File

@ -34,42 +34,17 @@
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
__wsum __csum_partial_copy_kernel(const void *src, void *dst,
int len, __wsum sum, int *err_ptr);
__wsum __csum_partial_copy_from_user(const void *src, void *dst,
int len, __wsum sum, int *err_ptr);
__wsum __csum_partial_copy_to_user(const void *src, void *dst,
int len, __wsum sum, int *err_ptr);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err_ptr)
{
might_fault();
if (uaccess_kernel())
return __csum_partial_copy_kernel((__force void *)src, dst,
len, sum, err_ptr);
else
return __csum_partial_copy_from_user((__force void *)src, dst,
len, sum, err_ptr);
}
__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len);
__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
if (access_ok(src, len))
return csum_partial_copy_from_user(src, dst, len, sum,
err_ptr);
if (len)
*err_ptr = -EFAULT;
return sum;
might_fault();
if (!access_ok(src, len))
return 0;
return __csum_partial_copy_from_user(src, dst, len);
}
/*
@ -77,33 +52,24 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
*/
#define HAVE_CSUM_COPY_USER
static inline
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err_ptr)
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
might_fault();
if (access_ok(dst, len)) {
if (uaccess_kernel())
return __csum_partial_copy_kernel(src,
(__force void *)dst,
len, sum, err_ptr);
else
return __csum_partial_copy_to_user(src,
(__force void *)dst,
len, sum, err_ptr);
}
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
if (!access_ok(dst, len))
return 0;
return __csum_partial_copy_to_user(src, dst, len);
}
/*
* the same as csum_partial, but copies from user space (but on MIPS
* we have just one address space, so this is identical to the above)
*/
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
#define csum_partial_copy_nocheck csum_partial_copy_nocheck
#define _HAVE_ARCH_CSUM_AND_COPY
__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len);
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
return __csum_partial_copy_nocheck(src, dst, len);
}
/*
* Fold a partial checksum without adding pseudo headers

View File

@ -308,8 +308,8 @@ EXPORT_SYMBOL(csum_partial)
/*
* checksum and copy routines based on memcpy.S
*
* csum_partial_copy_nocheck(src, dst, len, sum)
* __csum_partial_copy_kernel(src, dst, len, sum, errp)
* csum_partial_copy_nocheck(src, dst, len)
* __csum_partial_copy_kernel(src, dst, len)
*
* See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
@ -318,26 +318,11 @@ EXPORT_SYMBOL(csum_partial)
#define src a0
#define dst a1
#define len a2
#define psum a3
#define sum v0
#define odd t8
#define errptr t9
/*
* The exception handler for loads requires that:
* 1- AT contain the address of the byte just past the end of the source
* of the copy,
* 2- src_entry <= src < AT, and
* 3- (dst - src) == (dst_entry - src_entry),
* The _entry suffix denotes values when __copy_user was called.
*
* (1) is set up up by __csum_partial_copy_from_user and maintained by
* not writing AT in __csum_partial_copy
* (2) is met by incrementing src by the number of bytes copied
* (3) is met by not doing loads between a pair of increments of dst and src
*
* The exception handlers for stores stores -EFAULT to errptr and return.
* These handlers do not need to overwrite any data.
* All exception handlers simply return 0.
*/
/* Instruction type */
@ -358,11 +343,11 @@ EXPORT_SYMBOL(csum_partial)
* addr : Address
* handler : Exception handler
*/
#define EXC(insn, type, reg, addr, handler) \
#define EXC(insn, type, reg, addr) \
.if \mode == LEGACY_MODE; \
9: insn reg, addr; \
.section __ex_table,"a"; \
PTR 9b, handler; \
PTR 9b, .L_exc; \
.previous; \
/* This is enabled in EVA mode */ \
.else; \
@ -371,7 +356,7 @@ EXPORT_SYMBOL(csum_partial)
((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \
PTR 9b, handler; \
PTR 9b, .L_exc; \
.previous; \
.else; \
/* EVA without exception */ \
@ -384,14 +369,14 @@ EXPORT_SYMBOL(csum_partial)
#ifdef USE_DOUBLE
#define LOADK ld /* No exception */
#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr)
#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr)
#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr)
#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr)
#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr)
#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr)
#define ADD daddu
#define SUB dsubu
#define SRL dsrl
@ -404,14 +389,14 @@ EXPORT_SYMBOL(csum_partial)
#else
#define LOADK lw /* No exception */
#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr)
#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr)
#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr)
#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr)
#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr)
#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr)
#define ADD addu
#define SUB subu
#define SRL srl
@ -450,22 +435,9 @@ EXPORT_SYMBOL(csum_partial)
.set at=v1
#endif
.macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
.macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to
PTR_ADDU AT, src, len /* See (1) above. */
/* initialize __nocheck if this the first time we execute this
* macro
*/
#ifdef CONFIG_64BIT
move errptr, a4
#else
lw errptr, 16(sp)
#endif
.if \__nocheck == 1
FEXPORT(csum_partial_copy_nocheck)
EXPORT_SYMBOL(csum_partial_copy_nocheck)
.endif
move sum, zero
li sum, -1
move odd, zero
/*
* Note: dst & src may be unaligned, len may be 0
@ -497,31 +469,31 @@ EXPORT_SYMBOL(csum_partial)
SUB len, 8*NBYTES # subtract here for bgez loop
.align 4
1:
LOAD(t0, UNIT(0)(src), .Ll_exc\@)
LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
LOAD(t0, UNIT(0)(src))
LOAD(t1, UNIT(1)(src))
LOAD(t2, UNIT(2)(src))
LOAD(t3, UNIT(3)(src))
LOAD(t4, UNIT(4)(src))
LOAD(t5, UNIT(5)(src))
LOAD(t6, UNIT(6)(src))
LOAD(t7, UNIT(7)(src))
SUB len, len, 8*NBYTES
ADD src, src, 8*NBYTES
STORE(t0, UNIT(0)(dst), .Ls_exc\@)
STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
STORE(t1, UNIT(1)(dst), .Ls_exc\@)
STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
STORE(t2, UNIT(2)(dst), .Ls_exc\@)
STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
STORE(t3, UNIT(3)(dst), .Ls_exc\@)
STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
STORE(t4, UNIT(4)(dst), .Ls_exc\@)
STORE(t4, UNIT(4)(dst))
ADDC(t4, t5)
STORE(t5, UNIT(5)(dst), .Ls_exc\@)
STORE(t5, UNIT(5)(dst))
ADDC(sum, t4)
STORE(t6, UNIT(6)(dst), .Ls_exc\@)
STORE(t6, UNIT(6)(dst))
ADDC(t6, t7)
STORE(t7, UNIT(7)(dst), .Ls_exc\@)
STORE(t7, UNIT(7)(dst))
ADDC(sum, t6)
.set reorder /* DADDI_WAR */
ADD dst, dst, 8*NBYTES
@ -541,19 +513,19 @@ EXPORT_SYMBOL(csum_partial)
/*
* len >= 4*NBYTES
*/
LOAD(t0, UNIT(0)(src), .Ll_exc\@)
LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
LOAD(t0, UNIT(0)(src))
LOAD(t1, UNIT(1)(src))
LOAD(t2, UNIT(2)(src))
LOAD(t3, UNIT(3)(src))
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
STORE(t0, UNIT(0)(dst), .Ls_exc\@)
STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
STORE(t1, UNIT(1)(dst), .Ls_exc\@)
STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
STORE(t2, UNIT(2)(dst), .Ls_exc\@)
STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
STORE(t3, UNIT(3)(dst), .Ls_exc\@)
STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@ -566,10 +538,10 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
LOAD(t0, 0(src), .Ll_exc\@)
LOAD(t0, 0(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
STORE(t0, 0(dst), .Ls_exc\@)
STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@ -592,10 +564,10 @@ EXPORT_SYMBOL(csum_partial)
ADD t1, dst, len # t1 is just past last byte of dst
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
LOAD(t0, 0(src), .Ll_exc\@)
LOAD(t0, 0(src))
SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
STREST(t0, -1(t1), .Ls_exc\@)
STREST(t0, -1(t1))
SHIFT_DISCARD_REVERT t0, t0, bits
.set reorder
ADDC(sum, t0)
@ -612,12 +584,12 @@ EXPORT_SYMBOL(csum_partial)
* Set match = (src and dst have same alignment)
*/
#define match rem
LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
LDFIRST(t3, FIRST(0)(src))
ADD t2, zero, NBYTES
LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
LDREST(t3, REST(0)(src))
SUB t2, t2, t1 # t2 = number of bytes copied
xor match, t0, t1
STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
STFIRST(t3, FIRST(0)(dst))
SLL t4, t1, 3 # t4 = number of bits to discard
SHIFT_DISCARD t3, t3, t4
/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
@ -639,26 +611,26 @@ EXPORT_SYMBOL(csum_partial)
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
LDFIRST(t0, FIRST(0)(src))
LDFIRST(t1, FIRST(1)(src))
SUB len, len, 4*NBYTES
LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
LDREST(t0, REST(0)(src))
LDREST(t1, REST(1)(src))
LDFIRST(t2, FIRST(2)(src))
LDFIRST(t3, FIRST(3)(src))
LDREST(t2, REST(2)(src))
LDREST(t3, REST(3)(src))
ADD src, src, 4*NBYTES
#ifdef CONFIG_CPU_SB1
nop # improves slotting
#endif
STORE(t0, UNIT(0)(dst), .Ls_exc\@)
STORE(t0, UNIT(0)(dst))
ADDC(t0, t1)
STORE(t1, UNIT(1)(dst), .Ls_exc\@)
STORE(t1, UNIT(1)(dst))
ADDC(sum, t0)
STORE(t2, UNIT(2)(dst), .Ls_exc\@)
STORE(t2, UNIT(2)(dst))
ADDC(t2, t3)
STORE(t3, UNIT(3)(dst), .Ls_exc\@)
STORE(t3, UNIT(3)(dst))
ADDC(sum, t2)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
@ -671,11 +643,11 @@ EXPORT_SYMBOL(csum_partial)
beq rem, len, .Lcopy_bytes\@
nop
1:
LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
LDFIRST(t0, FIRST(0)(src))
LDREST(t0, REST(0)(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
STORE(t0, 0(dst), .Ls_exc\@)
STORE(t0, 0(dst))
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
@ -696,11 +668,10 @@ EXPORT_SYMBOL(csum_partial)
#endif
move t2, zero # partial word
li t3, SHIFT_START # shift
/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
LOADBU(t0, N(src), .Ll_exc_copy\@); \
LOADBU(t0, N(src)); \
SUB len, len, 1; \
STOREB(t0, N(dst), .Ls_exc\@); \
STOREB(t0, N(dst)); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
beqz len, .Lcopy_bytes_done\@; \
@ -714,9 +685,9 @@ EXPORT_SYMBOL(csum_partial)
COPY_BYTE(4)
COPY_BYTE(5)
#endif
LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
LOADBU(t0, NBYTES-2(src))
SUB len, len, 1
STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
STOREB(t0, NBYTES-2(dst))
SLLV t0, t0, t3
or t2, t0
.Lcopy_bytes_done\@:
@ -753,97 +724,31 @@ EXPORT_SYMBOL(csum_partial)
#endif
.set pop
.set reorder
ADDC32(sum, psum)
jr ra
.set noreorder
.Ll_exc_copy\@:
/*
* Copy bytes from src until faulting load address (or until a
* lb faults)
*
* When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
* may be more than a byte beyond the last address.
* Hence, the lb below may get an exception.
*
* Assumes src < THREAD_BUADDR($28)
*/
LOADK t0, TI_TASK($28)
li t2, SHIFT_START
LOADK t0, THREAD_BUADDR(t0)
1:
LOADBU(t1, 0(src), .Ll_exc\@)
ADD src, src, 1
sb t1, 0(dst) # can't fault -- we're copy_from_user
SLLV t1, t1, t2
addu t2, SHIFT_INC
ADDC(sum, t1)
.set reorder /* DADDI_WAR */
ADD dst, dst, 1
bne src, t0, 1b
.set noreorder
.Ll_exc\@:
LOADK t0, TI_TASK($28)
nop
LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
* dst += (fault addr - src) to put dst at first byte to clear
*/
ADD dst, t0 # compute start address in a1
SUB dst, src
/*
* Clear len bytes starting at dst. Can't call __bzero because it
* might modify len. An inefficient loop for these rare times...
*/
.set reorder /* DADDI_WAR */
SUB src, len, 1
beqz len, .Ldone\@
.set noreorder
1: sb zero, 0(dst)
ADD dst, dst, 1
.set push
.set noat
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
bnez src, 1b
SUB src, src, 1
#else
li v1, 1
bnez src, 1b
SUB src, src, v1
#endif
li v1, -EFAULT
b .Ldone\@
sw v1, (errptr)
.Ls_exc\@:
li v0, -1 /* invalid checksum */
li v1, -EFAULT
jr ra
sw v1, (errptr)
.set pop
.endm
LEAF(__csum_partial_copy_kernel)
EXPORT_SYMBOL(__csum_partial_copy_kernel)
.set noreorder
.L_exc:
jr ra
li v0, 0
FEXPORT(__csum_partial_copy_nocheck)
EXPORT_SYMBOL(__csum_partial_copy_nocheck)
#ifndef CONFIG_EVA
FEXPORT(__csum_partial_copy_to_user)
EXPORT_SYMBOL(__csum_partial_copy_to_user)
FEXPORT(__csum_partial_copy_from_user)
EXPORT_SYMBOL(__csum_partial_copy_from_user)
#endif
__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
END(__csum_partial_copy_kernel)
__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP
#ifdef CONFIG_EVA
LEAF(__csum_partial_copy_to_user)
__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP
END(__csum_partial_copy_to_user)
LEAF(__csum_partial_copy_from_user)
__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP
END(__csum_partial_copy_from_user)
#endif

View File

@ -12,10 +12,6 @@
/* Take these from lib/checksum.c */
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
__wsum sum);
#define csum_partial_copy_nocheck csum_partial_copy_nocheck
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
extern __sum16 ip_compute_csum(const void *buff, int len);

View File

@ -18,14 +18,6 @@
*/
extern __wsum csum_partial(const void *, int, __wsum);
/*
* The same as csum_partial, but copies from src while it checksums.
*
* Here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);
/*
* Optimized for IP headers, which always checksum on 4 octet boundaries.
*
@ -181,25 +173,5 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum,
int *err_ptr)
{
/* code stolen from include/asm-mips64 */
sum = csum_partial(src, len, sum);
if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum)-1;
}
return sum;
}
#endif

View File

@ -106,20 +106,3 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
}
EXPORT_SYMBOL(csum_partial);
/*
* copy while checksumming, otherwise like csum_partial
*/
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
/*
* It's 2:30 am and I don't feel like doing it real ...
* This is lots slower than the real thing (tm)
*/
sum = csum_partial(src, len, sum);
memcpy(dst, src, len);
return sum;
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);

View File

@ -18,19 +18,18 @@
* Like csum_partial, this must be called with even lengths,
* except for the last fragment.
*/
extern __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err, int *dst_err);
extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr);
int len);
#define HAVE_CSUM_COPY_USER
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
int len, __wsum sum, int *err_ptr);
int len);
#define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
#define _HAVE_ARCH_CSUM_AND_COPY
#define csum_partial_copy_nocheck(src, dst, len) \
csum_partial_copy_generic((src), (dst), (len))
/*

View File

@ -78,12 +78,10 @@ EXPORT_SYMBOL(__csum_partial)
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
* If an access exception occurs on src or dst, it stores -EFAULT
* to *src_err or *dst_err respectively, and (for an error on
* src) zeroes the rest of dst.
* and adds in 0xffffffff, while copying the block to dst.
* If an access exception occurs it returns zero.
*
* csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
* csum_partial_copy_generic(src, dst, len)
*/
#define CSUM_COPY_16_BYTES_WITHEX(n) \
8 ## n ## 0: \
@ -108,14 +106,14 @@ EXPORT_SYMBOL(__csum_partial)
adde r12,r12,r10
#define CSUM_COPY_16_BYTES_EXCODE(n) \
EX_TABLE(8 ## n ## 0b, src_error); \
EX_TABLE(8 ## n ## 1b, src_error); \
EX_TABLE(8 ## n ## 2b, src_error); \
EX_TABLE(8 ## n ## 3b, src_error); \
EX_TABLE(8 ## n ## 4b, dst_error); \
EX_TABLE(8 ## n ## 5b, dst_error); \
EX_TABLE(8 ## n ## 6b, dst_error); \
EX_TABLE(8 ## n ## 7b, dst_error);
EX_TABLE(8 ## n ## 0b, fault); \
EX_TABLE(8 ## n ## 1b, fault); \
EX_TABLE(8 ## n ## 2b, fault); \
EX_TABLE(8 ## n ## 3b, fault); \
EX_TABLE(8 ## n ## 4b, fault); \
EX_TABLE(8 ## n ## 5b, fault); \
EX_TABLE(8 ## n ## 6b, fault); \
EX_TABLE(8 ## n ## 7b, fault);
.text
.stabs "arch/powerpc/lib/",N_SO,0,0,0f
@ -127,11 +125,8 @@ LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
_GLOBAL(csum_partial_copy_generic)
stwu r1,-16(r1)
stw r7,12(r1)
stw r8,8(r1)
addic r12,r6,0
li r12,-1
addic r0,r0,0 /* clear carry */
addi r6,r4,-4
neg r0,r4
addi r4,r3,-4
@ -246,34 +241,19 @@ _GLOBAL(csum_partial_copy_generic)
rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
blr
/* read fault */
src_error:
lwz r7,12(r1)
addi r1,r1,16
cmpwi cr0,r7,0
beqlr
li r0,-EFAULT
stw r0,0(r7)
blr
/* write fault */
dst_error:
lwz r8,8(r1)
addi r1,r1,16
cmpwi cr0,r8,0
beqlr
li r0,-EFAULT
stw r0,0(r8)
fault:
li r3,0
blr
EX_TABLE(70b, src_error);
EX_TABLE(71b, dst_error);
EX_TABLE(72b, src_error);
EX_TABLE(73b, dst_error);
EX_TABLE(54b, dst_error);
EX_TABLE(70b, fault);
EX_TABLE(71b, fault);
EX_TABLE(72b, fault);
EX_TABLE(73b, fault);
EX_TABLE(54b, fault);
/*
* this stuff handles faults in the cacheline loop and branches to either
* src_error (if in read part) or dst_error (if in write part)
* fault (if in read part) or fault (if in write part)
*/
CSUM_COPY_16_BYTES_EXCODE(0)
#if L1_CACHE_BYTES >= 32
@ -290,12 +270,12 @@ dst_error:
#endif
#endif
EX_TABLE(30b, src_error);
EX_TABLE(31b, dst_error);
EX_TABLE(40b, src_error);
EX_TABLE(41b, dst_error);
EX_TABLE(50b, src_error);
EX_TABLE(51b, dst_error);
EX_TABLE(30b, fault);
EX_TABLE(31b, fault);
EX_TABLE(40b, fault);
EX_TABLE(41b, fault);
EX_TABLE(50b, fault);
EX_TABLE(51b, fault);
EXPORT_SYMBOL(csum_partial_copy_generic)

View File

@ -182,34 +182,33 @@ EXPORT_SYMBOL(__csum_partial)
.macro srcnr
100:
EX_TABLE(100b,.Lsrc_error_nr)
EX_TABLE(100b,.Lerror_nr)
.endm
.macro source
150:
EX_TABLE(150b,.Lsrc_error)
EX_TABLE(150b,.Lerror)
.endm
.macro dstnr
200:
EX_TABLE(200b,.Ldest_error_nr)
EX_TABLE(200b,.Lerror_nr)
.endm
.macro dest
250:
EX_TABLE(250b,.Ldest_error)
EX_TABLE(250b,.Lerror)
.endm
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
* If an access exception occurs on src or dst, it stores -EFAULT
* to *src_err or *dst_err respectively. The caller must take any action
* required in this case (zeroing memory, recalculating partial checksum etc).
* and adds in 0xffffffff (32-bit), while copying the block to dst.
* If an access exception occurs, it returns 0.
*
* csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
* csum_partial_copy_generic(r3=src, r4=dst, r5=len)
*/
_GLOBAL(csum_partial_copy_generic)
li r6,-1
addic r0,r6,0 /* clear carry */
srdi. r6,r5,3 /* less than 8 bytes? */
@ -401,29 +400,15 @@ dstnr; stb r6,0(r4)
srdi r3,r3,32
blr
.Lsrc_error:
.Lerror:
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
.Lsrc_error_nr:
cmpdi 0,r7,0
beqlr
li r6,-EFAULT
stw r6,0(r7)
.Lerror_nr:
li r3,0
blr
.Ldest_error:
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
.Ldest_error_nr:
cmpdi 0,r8,0
beqlr
li r6,-EFAULT
stw r6,0(r8)
blr
EXPORT_SYMBOL(csum_partial_copy_generic)
/*

View File

@ -12,83 +12,37 @@
#include <linux/uaccess.h>
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
int len)
{
unsigned int csum;
__wsum csum;
might_sleep();
if (unlikely(!access_ok(src, len)))
return 0;
allow_read_from_user(src, len);
*err_ptr = 0;
csum = csum_partial_copy_generic((void __force *)src, dst, len);
if (!len) {
csum = 0;
goto out;
}
if (unlikely((len < 0) || !access_ok(src, len))) {
*err_ptr = -EFAULT;
csum = (__force unsigned int)sum;
goto out;
}
csum = csum_partial_copy_generic((void __force *)src, dst,
len, sum, err_ptr, NULL);
if (unlikely(*err_ptr)) {
int missing = __copy_from_user(dst, src, len);
if (missing) {
memset(dst + len - missing, 0, missing);
*err_ptr = -EFAULT;
} else {
*err_ptr = 0;
}
csum = csum_partial(dst, len, sum);
}
out:
prevent_read_from_user(src, len);
return (__force __wsum)csum;
return csum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err_ptr)
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
unsigned int csum;
__wsum csum;
might_sleep();
if (unlikely(!access_ok(dst, len)))
return 0;
allow_write_to_user(dst, len);
*err_ptr = 0;
csum = csum_partial_copy_generic(src, (void __force *)dst, len);
if (!len) {
csum = 0;
goto out;
}
if (unlikely((len < 0) || !access_ok(dst, len))) {
*err_ptr = -EFAULT;
csum = -1; /* invalid checksum */
goto out;
}
csum = csum_partial_copy_generic(src, (void __force *)dst,
len, sum, NULL, err_ptr);
if (unlikely(*err_ptr)) {
csum = csum_partial(src, len, sum);
if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
csum = -1; /* invalid checksum */
}
}
out:
prevent_write_to_user(dst, len);
return (__force __wsum)csum;
return csum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);

View File

@ -39,13 +39,6 @@ csum_partial(const void *buff, int len, __wsum sum)
return sum;
}
static inline __wsum
csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst,src,len);
return csum_partial(dst, len, sum);
}
/*
* Fold a partial checksum without adding pseudo headers
*/

View File

@ -30,10 +30,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
#define _HAVE_ARCH_CSUM_AND_COPY
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
@ -42,23 +41,18 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* access_ok().
*/
static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
return csum_partial_copy_generic(src, dst, len);
}
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
if (access_ok(src, len))
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
if (len)
*err_ptr = -EFAULT;
return sum;
if (!access_ok(src, len))
return 0;
return csum_partial_copy_generic((__force const void *)src, dst, len);
}
/*
@ -199,16 +193,10 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
#define HAVE_CSUM_COPY_USER
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum,
int *err_ptr)
int len)
{
if (access_ok(dst, len))
return csum_partial_copy_generic((__force const void *)src,
dst, len, sum, NULL, err_ptr);
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
if (!access_ok(dst, len))
return 0;
return csum_partial_copy_generic((__force const void *)src, dst, len);
}
#endif /* __ASM_SH_CHECKSUM_H */

View File

@ -173,47 +173,27 @@ ENTRY(csum_partial)
mov r6, r0
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
int sum, int *src_err_ptr, int *dst_err_ptr)
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
*/
/*
* Copy from ds while checksumming, otherwise like csum_partial
*
* The macros SRC and DST specify the type of access for the instruction.
* thus we can call a custom exception handler for all access types.
*
* FIXME: could someone double-check whether I haven't mixed up some SRC and
* DST definitions? It's damn hard to trigger all cases. I hope I got
* them all but there's no guarantee.
* Copy from ds while checksumming, otherwise like csum_partial with initial
* sum being ~0U
*/
#define SRC(...) \
#define EXC(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
#define DST(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6002f ; \
.previous
!
! r4: const char *SRC
! r5: char *DST
! r6: int LEN
! r7: int SUM
!
! on stack:
! int *SRC_ERR_PTR
! int *DST_ERR_PTR
!
ENTRY(csum_partial_copy_generic)
mov.l r5,@-r15
mov.l r6,@-r15
mov #-1,r7
mov #3,r0 ! Check src and dest are equally aligned
mov r4,r1
and r0,r1
@ -243,11 +223,11 @@ ENTRY(csum_partial_copy_generic)
clrt
.align 2
5:
SRC( mov.b @r4+,r1 )
SRC( mov.b @r4+,r0 )
EXC( mov.b @r4+,r1 )
EXC( mov.b @r4+,r0 )
extu.b r1,r1
DST( mov.b r1,@r5 )
DST( mov.b r0,@(1,r5) )
EXC( mov.b r1,@r5 )
EXC( mov.b r0,@(1,r5) )
extu.b r0,r0
add #2,r5
@ -276,8 +256,8 @@ DST( mov.b r0,@(1,r5) )
! Handle first two bytes as a special case
.align 2
1:
SRC( mov.w @r4+,r0 )
DST( mov.w r0,@r5 )
EXC( mov.w @r4+,r0 )
EXC( mov.w r0,@r5 )
add #2,r5
extu.w r0,r0
addc r0,r7
@ -292,32 +272,32 @@ DST( mov.w r0,@r5 )
clrt
.align 2
1:
SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
EXC( mov.l @r4+,r0 )
EXC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@r5 )
DST( mov.l r1,@(4,r5) )
EXC( mov.l r0,@r5 )
EXC( mov.l r1,@(4,r5) )
addc r1,r7
SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
EXC( mov.l @r4+,r0 )
EXC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@(8,r5) )
DST( mov.l r1,@(12,r5) )
EXC( mov.l r0,@(8,r5) )
EXC( mov.l r1,@(12,r5) )
addc r1,r7
SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
EXC( mov.l @r4+,r0 )
EXC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@(16,r5) )
DST( mov.l r1,@(20,r5) )
EXC( mov.l r0,@(16,r5) )
EXC( mov.l r1,@(20,r5) )
addc r1,r7
SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
EXC( mov.l @r4+,r0 )
EXC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@(24,r5) )
DST( mov.l r1,@(28,r5) )
EXC( mov.l r0,@(24,r5) )
EXC( mov.l r1,@(28,r5) )
addc r1,r7
add #32,r5
movt r0
@ -335,9 +315,9 @@ DST( mov.l r1,@(28,r5) )
clrt
shlr2 r6
3:
SRC( mov.l @r4+,r0 )
EXC( mov.l @r4+,r0 )
addc r0,r7
DST( mov.l r0,@r5 )
EXC( mov.l r0,@r5 )
add #4,r5
movt r0
dt r6
@ -353,8 +333,8 @@ DST( mov.l r0,@r5 )
mov #2,r1
cmp/hs r1,r6
bf 5f
SRC( mov.w @r4+,r0 )
DST( mov.w r0,@r5 )
EXC( mov.w @r4+,r0 )
EXC( mov.w r0,@r5 )
extu.w r0,r0
add #2,r5
cmp/eq r1,r6
@ -363,8 +343,8 @@ DST( mov.w r0,@r5 )
shll16 r0
addc r0,r7
5:
SRC( mov.b @r4+,r0 )
DST( mov.b r0,@r5 )
EXC( mov.b @r4+,r0 )
EXC( mov.b r0,@r5 )
extu.b r0,r0
#ifndef __LITTLE_ENDIAN__
shll8 r0
@ -373,42 +353,13 @@ DST( mov.b r0,@r5 )
mov #0,r0
addc r0,r7
7:
5000:
# Exception handler:
.section .fixup, "ax"
6001:
mov.l @(8,r15),r0 ! src_err_ptr
mov #-EFAULT,r1
mov.l r1,@r0
! zero the complete destination - computing the rest
! is too much work
mov.l @(4,r15),r5 ! dst
mov.l @r15,r6 ! len
mov #0,r7
1: mov.b r7,@r5
dt r6
bf/s 1b
add #1,r5
mov.l 8000f,r0
jmp @r0
nop
.align 2
8000: .long 5000b
6002:
mov.l @(12,r15),r0 ! dst_err_ptr
mov #-EFAULT,r1
mov.l r1,@r0
mov.l 8001f,r0
jmp @r0
nop
.align 2
8001: .long 5000b
rts
mov #0,r0
.previous
add #8,r15
rts
mov r7,r0

View File

@ -1,7 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_CHECKSUM_H
#define ___ASM_SPARC_CHECKSUM_H
#define _HAVE_ARCH_CSUM_AND_COPY
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
#define HAVE_CSUM_COPY_USER
#if defined(__sparc__) && defined(__arch64__)
#include <asm/checksum_64.h>
#else

View File

@ -42,7 +42,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
@ -50,9 +50,9 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n"
" mov -1, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum)
: "0" (ret), "1" (d), "2" (l)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
@ -60,65 +60,19 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
}
static inline __wsum
csum_and_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err)
{
register unsigned long ret asm("o0") = (unsigned long)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
if (unlikely(!access_ok(src, len))) {
if (len)
*err = -EFAULT;
return sum;
}
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,2\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
if (unlikely(!access_ok(src, len)))
return 0;
return csum_partial_copy_nocheck((__force void *)src, dst, len);
}
#define HAVE_CSUM_COPY_USER
static inline __wsum
csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err)
csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
if (!access_ok(dst, len)) {
*err = -EFAULT;
return sum;
} else {
register unsigned long ret asm("o0") = (unsigned long)src;
register char __user *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,1\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
if (!access_ok(dst, len))
return 0;
return csum_partial_copy_nocheck(src, (__force void *)dst, len);
}
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned

View File

@ -38,42 +38,9 @@ __wsum csum_partial(const void * buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
long __csum_partial_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum);
static inline __wsum
csum_and_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_from_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
long __csum_partial_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum);
static inline __wsum
csum_and_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_to_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len);
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.

View File

@ -144,44 +144,21 @@ cpte: bne csum_partial_end_cruft ! yep, handle it
cpout: retl ! get outta here
mov %o2, %o0 ! return computed csum
.globl __csum_partial_copy_start, __csum_partial_copy_end
__csum_partial_copy_start:
/* Work around cpp -rob */
#define ALLOC #alloc
#define EXECINSTR #execinstr
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
99: ba 30f; \
a, b, %o3; \
.section __ex_table,ALLOC; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4
#define EX2(x,y) \
#define EX(x,y) \
98: x,y; \
.section __ex_table,ALLOC; \
.align 4; \
.word 98b, 30f; \
.word 98b, cc_fault; \
.text; \
.align 4
#define EX3(x,y) \
98: x,y; \
#define EXT(start,end) \
.section __ex_table,ALLOC; \
.align 4; \
.word 98b, 96f; \
.text; \
.align 4
#define EXT(start,end,handler) \
.section __ex_table,ALLOC; \
.align 4; \
.word start, 0, end, handler; \
.word start, 0, end, cc_fault; \
.text; \
.align 4
@ -252,21 +229,21 @@ __csum_partial_copy_start:
cc_end_cruft:
be 1f
andcc %o3, 4, %g0
EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf)
EX(ldd [%o0 + 0x00], %g2)
add %o1, 8, %o1
addcc %g2, %g7, %g7
add %o0, 8, %o0
addxcc %g3, %g7, %g7
EX2(st %g2, [%o1 - 0x08])
EX(st %g2, [%o1 - 0x08])
addx %g0, %g7, %g7
andcc %o3, 4, %g0
EX2(st %g3, [%o1 - 0x04])
EX(st %g3, [%o1 - 0x04])
1: be 1f
andcc %o3, 3, %o3
EX(ld [%o0 + 0x00], %g2, add %o3, 4)
EX(ld [%o0 + 0x00], %g2)
add %o1, 4, %o1
addcc %g2, %g7, %g7
EX2(st %g2, [%o1 - 0x04])
EX(st %g2, [%o1 - 0x04])
addx %g0, %g7, %g7
andcc %o3, 3, %g0
add %o0, 4, %o0
@ -276,14 +253,14 @@ cc_end_cruft:
subcc %o3, 2, %o3
b 4f
or %g0, %g0, %o4
2: EX(lduh [%o0 + 0x00], %o4, add %o3, 2)
2: EX(lduh [%o0 + 0x00], %o4)
add %o0, 2, %o0
EX2(sth %o4, [%o1 + 0x00])
EX(sth %o4, [%o1 + 0x00])
be 6f
add %o1, 2, %o1
sll %o4, 16, %o4
4: EX(ldub [%o0 + 0x00], %o5, add %g0, 1)
EX2(stb %o5, [%o1 + 0x00])
4: EX(ldub [%o0 + 0x00], %o5)
EX(stb %o5, [%o1 + 0x00])
sll %o5, 8, %o5
or %o5, %o4, %o4
6: addcc %o4, %g7, %g7
@ -306,9 +283,9 @@ cc_dword_align:
andcc %o0, 0x2, %g0
be 1f
andcc %o0, 0x4, %g0
EX(lduh [%o0 + 0x00], %g4, add %g1, 0)
EX(lduh [%o0 + 0x00], %g4)
sub %g1, 2, %g1
EX2(sth %g4, [%o1 + 0x00])
EX(sth %g4, [%o1 + 0x00])
add %o0, 2, %o0
sll %g4, 16, %g4
addcc %g4, %g7, %g7
@ -322,9 +299,9 @@ cc_dword_align:
or %g3, %g7, %g7
1: be 3f
andcc %g1, 0xffffff80, %g0
EX(ld [%o0 + 0x00], %g4, add %g1, 0)
EX(ld [%o0 + 0x00], %g4)
sub %g1, 4, %g1
EX2(st %g4, [%o1 + 0x00])
EX(st %g4, [%o1 + 0x00])
add %o0, 4, %o0
addcc %g4, %g7, %g7
add %o1, 4, %o1
@ -354,7 +331,7 @@ __csum_partial_copy_sparc_generic:
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
10: EXT(5b, 10b, 20f) ! note for exception handling
10: EXT(5b, 10b) ! note for exception handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
@ -379,7 +356,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
12: EXT(cctbl, 12b, 22f) ! note for exception table handling
12: EXT(cctbl, 12b) ! note for exception table handling
addx %g0, %g7, %g7
andcc %o3, 0xf, %g0 ! check for low bits set
ccte: bne cc_end_cruft ! something left, handle it out of band
@ -390,7 +367,7 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
11: EXT(ccdbl, 11b, 21f) ! note for exception table handling
11: EXT(ccdbl, 11b) ! note for exception table handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
@ -407,9 +384,9 @@ ccslow: cmp %g1, 0
be,a 1f
srl %g1, 1, %g4
sub %g1, 1, %g1
EX(ldub [%o0], %g5, add %g1, 1)
EX(ldub [%o0], %g5)
add %o0, 1, %o0
EX2(stb %g5, [%o1])
EX(stb %g5, [%o1])
srl %g1, 1, %g4
add %o1, 1, %o1
1: cmp %g4, 0
@ -418,34 +395,34 @@ ccslow: cmp %g1, 0
andcc %o0, 2, %g0
be,a 1f
srl %g4, 1, %g4
EX(lduh [%o0], %o4, add %g1, 0)
EX(lduh [%o0], %o4)
sub %g1, 2, %g1
srl %o4, 8, %g2
sub %g4, 1, %g4
EX2(stb %g2, [%o1])
EX(stb %g2, [%o1])
add %o4, %g5, %g5
EX2(stb %o4, [%o1 + 1])
EX(stb %o4, [%o1 + 1])
add %o0, 2, %o0
srl %g4, 1, %g4
add %o1, 2, %o1
1: cmp %g4, 0
be,a 2f
andcc %g1, 2, %g0
EX3(ld [%o0], %o4)
EX(ld [%o0], %o4)
5: srl %o4, 24, %g2
srl %o4, 16, %g3
EX2(stb %g2, [%o1])
EX(stb %g2, [%o1])
srl %o4, 8, %g2
EX2(stb %g3, [%o1 + 1])
EX(stb %g3, [%o1 + 1])
add %o0, 4, %o0
EX2(stb %g2, [%o1 + 2])
EX(stb %g2, [%o1 + 2])
addcc %o4, %g5, %g5
EX2(stb %o4, [%o1 + 3])
EX(stb %o4, [%o1 + 3])
addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it
add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
subcc %g4, 1, %g4 ! tricks
bne,a 5b
EX3(ld [%o0], %o4)
EX(ld [%o0], %o4)
sll %g5, 16, %g2
srl %g5, 16, %g5
srl %g2, 16, %g2
@ -453,19 +430,19 @@ ccslow: cmp %g1, 0
add %g2, %g5, %g5
2: be,a 3f
andcc %g1, 1, %g0
EX(lduh [%o0], %o4, and %g1, 3)
EX(lduh [%o0], %o4)
andcc %g1, 1, %g0
srl %o4, 8, %g2
add %o0, 2, %o0
EX2(stb %g2, [%o1])
EX(stb %g2, [%o1])
add %g5, %o4, %g5
EX2(stb %o4, [%o1 + 1])
EX(stb %o4, [%o1 + 1])
add %o1, 2, %o1
3: be,a 1f
sll %g5, 16, %o4
EX(ldub [%o0], %g2, add %g0, 1)
EX(ldub [%o0], %g2)
sll %g2, 8, %o4
EX2(stb %g2, [%o1])
EX(stb %g2, [%o1])
add %g5, %o4, %g5
sll %g5, 16, %o4
1: addcc %o4, %g5, %g5
@ -481,113 +458,10 @@ ccslow: cmp %g1, 0
4: addcc %g7, %g5, %g7
retl
addx %g0, %g7, %o0
__csum_partial_copy_end:
/* We do these strange calculations for the csum_*_from_user case only, ie.
* we only bother with faults on loads... */
/* o2 = ((g2%20)&3)*8
* o3 = g1 - (g2/20)*32 - o2 */
20:
cmp %g2, 20
blu,a 1f
and %g2, 3, %o2
sub %g1, 32, %g1
b 20b
sub %g2, 20, %g2
1:
sll %o2, 3, %o2
b 31f
sub %g1, %o2, %o3
/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
* o3 = g1 - (g2/16)*32 - o2 */
21:
andcc %g2, 15, %o3
srl %g2, 4, %g2
be,a 1f
clr %o2
add %o3, 1, %o3
and %o3, 14, %o3
sll %o3, 3, %o2
1:
sll %g2, 5, %g2
sub %g1, %g2, %o3
b 31f
sub %o3, %o2, %o3
/* o0 += (g2/10)*16 - 0x70
* 01 += (g2/10)*16 - 0x70
* o2 = (g2 % 10) ? 8 : 0
* o3 += 0x70 - (g2/10)*16 - o2 */
22:
cmp %g2, 10
blu,a 1f
sub %o0, 0x70, %o0
add %o0, 16, %o0
add %o1, 16, %o1
sub %o3, 16, %o3
b 22b
sub %g2, 10, %g2
1:
sub %o1, 0x70, %o1
add %o3, 0x70, %o3
clr %o2
tst %g2
bne,a 1f
mov 8, %o2
1:
b 31f
sub %o3, %o2, %o3
96:
and %g1, 3, %g1
sll %g4, 2, %g4
add %g1, %g4, %o3
30:
/* %o1 is dst
* %o3 is # bytes to zero out
* %o4 is faulting address
* %o5 is %pc where fault occurred */
clr %o2
31:
/* %o0 is src
* %o1 is dst
* %o2 is # of bytes to copy from src to dst
* %o3 is # bytes to zero out
* %o4 is faulting address
* %o5 is %pc where fault occurred */
save %sp, -104, %sp
mov %i5, %o0
mov %i7, %o1
mov %i4, %o2
call lookup_fault
mov %g7, %i4
cmp %o0, 2
bne 1f
add %g0, -EFAULT, %i5
tst %i2
be 2f
mov %i0, %o1
mov %i1, %o0
5:
call memcpy
mov %i2, %o2
tst %o0
bne,a 2f
add %i3, %i2, %i3
add %i1, %i2, %i1
2:
mov %i1, %o0
6:
call __bzero
mov %i3, %o1
1:
ld [%sp + 168], %o2 ! struct_ptr of parent
st %i5, [%o2]
cc_fault:
ret
restore
.section __ex_table,#alloc
.align 4
.word 5b,2
.word 6b,2
clr %o0

View File

@ -68,9 +68,10 @@
.globl FUNC_NAME
.type FUNC_NAME,#function
EXPORT_SYMBOL(FUNC_NAME)
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */
LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1
mov 1, %o3
clr %o4
andcc %g1, 0x3, %g0
bne,pn %icc, 95f

View File

@ -9,14 +9,14 @@
.section .fixup, "ax"; \
.align 4; \
99: retl; \
mov -1, %o0; \
mov 0, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define FUNC_NAME __csum_partial_copy_from_user
#define FUNC_NAME csum_and_copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#include "csum_copy.S"

View File

@ -9,14 +9,14 @@
.section .fixup,"ax"; \
.align 4; \
99: retl; \
mov -1, %o0; \
mov 0, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#define FUNC_NAME __csum_partial_copy_to_user
#define FUNC_NAME csum_and_copy_to_user
#define STORE(type,src,addr) type##a src, [addr] %asi
#include "csum_copy.S"

View File

@ -288,8 +288,6 @@ no_context:
if (fixup > 10) {
extern const unsigned int __memset_start[];
extern const unsigned int __memset_end[];
extern const unsigned int __csum_partial_copy_start[];
extern const unsigned int __csum_partial_copy_end[];
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%08lx> faddr<%08lx>\n",
@ -298,9 +296,7 @@ no_context:
regs->pc, fixup, g2);
#endif
if ((regs->pc >= (unsigned long)__memset_start &&
regs->pc < (unsigned long)__memset_end) ||
(regs->pc >= (unsigned long)__csum_partial_copy_start &&
regs->pc < (unsigned long)__csum_partial_copy_end)) {
regs->pc < (unsigned long)__memset_end)) {
regs->u_regs[UREG_I4] = address;
regs->u_regs[UREG_I5] = regs->pc;
}

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
#define HAVE_CSUM_COPY_USER
#define _HAVE_ARCH_CSUM_AND_COPY
#ifdef CONFIG_X86_32
# include <asm/checksum_32.h>
#else

View File

@ -27,9 +27,7 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
/*
* Note: when you get a NULL pointer exception here this means someone
@ -38,26 +36,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* If you use these functions directly please don't forget the
* access_ok().
*/
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
return csum_partial_copy_generic(src, dst, len);
}
static inline __wsum csum_and_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum, int *err_ptr)
void *dst, int len)
{
__wsum ret;
might_sleep();
if (!user_access_begin(src, len)) {
if (len)
*err_ptr = -EFAULT;
return sum;
}
ret = csum_partial_copy_generic((__force void *)src, dst,
len, sum, err_ptr, NULL);
if (!user_access_begin(src, len))
return 0;
ret = csum_partial_copy_generic((__force void *)src, dst, len);
user_access_end();
return ret;
@ -178,23 +170,17 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
*/
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum,
int *err_ptr)
int len)
{
__wsum ret;
might_sleep();
if (user_access_begin(dst, len)) {
ret = csum_partial_copy_generic(src, (__force void *)dst,
len, sum, NULL, err_ptr);
user_access_end();
return ret;
}
if (!user_access_begin(dst, len))
return 0;
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
ret = csum_partial_copy_generic(src, (__force void *)dst, len);
user_access_end();
return ret;
}
#endif /* _ASM_X86_CHECKSUM_32_H */

View File

@ -130,17 +130,11 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* Do not call this directly. Use the wrappers below */
extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum isum, int *errp);
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
int len, __wsum isum, int *errp);
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len);
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
/**
* ip_compute_csum - Compute an 16bit IP checksum.

View File

@ -253,28 +253,17 @@ EXPORT_SYMBOL(csum_partial)
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst,
int len, int sum, int *src_err_ptr, int *dst_err_ptr)
int len)
*/
/*
* Copy from ds while checksumming, otherwise like csum_partial
*
* The macros SRC and DST specify the type of access for the instruction.
* thus we can call a custom exception handler for all access types.
*
* FIXME: could someone double-check whether I haven't mixed up some SRC and
* DST definitions? It's damn hard to trigger all cases. I hope I got
* them all but there's no guarantee.
*/
#define SRC(y...) \
#define EXC(y...) \
9999: y; \
_ASM_EXTABLE_UA(9999b, 6001f)
#define DST(y...) \
9999: y; \
_ASM_EXTABLE_UA(9999b, 6002f)
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
#define ARGBASE 16
@ -285,20 +274,20 @@ SYM_FUNC_START(csum_partial_copy_generic)
pushl %edi
pushl %esi
pushl %ebx
movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src
movl ARGBASE+8(%esp),%edi # dst
movl $-1, %eax # sum
testl $2, %edi # Check alignment.
jz 2f # Jump if alignment is ok.
subl $2, %ecx # Alignment uses up two bytes.
jae 1f # Jump if we had at least two bytes.
addl $2, %ecx # ecx was < 2. Deal with it.
jmp 4f
SRC(1: movw (%esi), %bx )
EXC(1: movw (%esi), %bx )
addl $2, %esi
DST( movw %bx, (%edi) )
EXC( movw %bx, (%edi) )
addl $2, %edi
addw %bx, %ax
adcl $0, %eax
@ -306,34 +295,34 @@ DST( movw %bx, (%edi) )
movl %ecx, FP(%esp)
shrl $5, %ecx
jz 2f
testl %esi, %esi
SRC(1: movl (%esi), %ebx )
SRC( movl 4(%esi), %edx )
testl %esi, %esi # what's wrong with clc?
EXC(1: movl (%esi), %ebx )
EXC( movl 4(%esi), %edx )
adcl %ebx, %eax
DST( movl %ebx, (%edi) )
EXC( movl %ebx, (%edi) )
adcl %edx, %eax
DST( movl %edx, 4(%edi) )
EXC( movl %edx, 4(%edi) )
SRC( movl 8(%esi), %ebx )
SRC( movl 12(%esi), %edx )
EXC( movl 8(%esi), %ebx )
EXC( movl 12(%esi), %edx )
adcl %ebx, %eax
DST( movl %ebx, 8(%edi) )
EXC( movl %ebx, 8(%edi) )
adcl %edx, %eax
DST( movl %edx, 12(%edi) )
EXC( movl %edx, 12(%edi) )
SRC( movl 16(%esi), %ebx )
SRC( movl 20(%esi), %edx )
EXC( movl 16(%esi), %ebx )
EXC( movl 20(%esi), %edx )
adcl %ebx, %eax
DST( movl %ebx, 16(%edi) )
EXC( movl %ebx, 16(%edi) )
adcl %edx, %eax
DST( movl %edx, 20(%edi) )
EXC( movl %edx, 20(%edi) )
SRC( movl 24(%esi), %ebx )
SRC( movl 28(%esi), %edx )
EXC( movl 24(%esi), %ebx )
EXC( movl 28(%esi), %edx )
adcl %ebx, %eax
DST( movl %ebx, 24(%edi) )
EXC( movl %ebx, 24(%edi) )
adcl %edx, %eax
DST( movl %edx, 28(%edi) )
EXC( movl %edx, 28(%edi) )
lea 32(%esi), %esi
lea 32(%edi), %edi
@ -345,9 +334,9 @@ DST( movl %edx, 28(%edi) )
andl $0x1c, %edx
je 4f
shrl $2, %edx # This clears CF
SRC(3: movl (%esi), %ebx )
EXC(3: movl (%esi), %ebx )
adcl %ebx, %eax
DST( movl %ebx, (%edi) )
EXC( movl %ebx, (%edi) )
lea 4(%esi), %esi
lea 4(%edi), %edi
dec %edx
@ -357,39 +346,24 @@ DST( movl %ebx, (%edi) )
jz 7f
cmpl $2, %ecx
jb 5f
SRC( movw (%esi), %cx )
EXC( movw (%esi), %cx )
leal 2(%esi), %esi
DST( movw %cx, (%edi) )
EXC( movw %cx, (%edi) )
leal 2(%edi), %edi
je 6f
shll $16,%ecx
SRC(5: movb (%esi), %cl )
DST( movb %cl, (%edi) )
EXC(5: movb (%esi), %cl )
EXC( movb %cl, (%edi) )
6: addl %ecx, %eax
adcl $0, %eax
7:
5000:
# Exception handler:
.section .fixup, "ax"
6001:
movl ARGBASE+20(%esp), %ebx # src_err_ptr
movl $-EFAULT, (%ebx)
# zero the complete destination - computing the rest
# is too much work
movl ARGBASE+8(%esp), %edi # dst
movl ARGBASE+12(%esp), %ecx # len
xorl %eax,%eax
rep ; stosb
jmp 5000b
6002:
movl ARGBASE+24(%esp), %ebx # dst_err_ptr
movl $-EFAULT,(%ebx)
jmp 5000b
xorl %eax, %eax
jmp 7b
.previous
@ -405,14 +379,14 @@ SYM_FUNC_END(csum_partial_copy_generic)
/* Version for PentiumII/PPro */
#define ROUND1(x) \
SRC(movl x(%esi), %ebx ) ; \
EXC(movl x(%esi), %ebx ) ; \
addl %ebx, %eax ; \
DST(movl %ebx, x(%edi) ) ;
EXC(movl %ebx, x(%edi) ) ;
#define ROUND(x) \
SRC(movl x(%esi), %ebx ) ; \
EXC(movl x(%esi), %ebx ) ; \
adcl %ebx, %eax ; \
DST(movl %ebx, x(%edi) ) ;
EXC(movl %ebx, x(%edi) ) ;
#define ARGBASE 12
@ -423,7 +397,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len
movl ARGBASE+16(%esp),%eax #sum
movl $-1, %eax #sum
# movl %ecx, %edx
movl %ecx, %ebx
movl %esi, %edx
@ -439,7 +413,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
JMP_NOSPEC ebx
1: addl $64,%esi
addl $64,%edi
SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
EXC(movb -32(%edx),%bl) ; EXC(movb (%edx),%bl)
ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
@ -453,29 +427,20 @@ SYM_FUNC_START(csum_partial_copy_generic)
jz 7f
cmpl $2, %edx
jb 5f
SRC( movw (%esi), %dx )
EXC( movw (%esi), %dx )
leal 2(%esi), %esi
DST( movw %dx, (%edi) )
EXC( movw %dx, (%edi) )
leal 2(%edi), %edi
je 6f
shll $16,%edx
5:
SRC( movb (%esi), %dl )
DST( movb %dl, (%edi) )
EXC( movb (%esi), %dl )
EXC( movb %dl, (%edi) )
6: addl %edx, %eax
adcl $0, %eax
7:
.section .fixup, "ax"
6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
movl $-EFAULT, (%ebx)
# zero the complete destination (computing the rest is too much work)
movl ARGBASE+8(%esp),%edi # dst
movl ARGBASE+12(%esp),%ecx # len
xorl %eax,%eax
rep; stosb
jmp 7b
6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
movl $-EFAULT, (%ebx)
6001: xorl %eax, %eax
jmp 7b
.previous

View File

@ -18,9 +18,6 @@
* rdi source
* rsi destination
* edx len (32bit)
* ecx sum (32bit)
* r8 src_err_ptr (int)
* r9 dst_err_ptr (int)
*
* Output
* eax 64bit sum. undefined in case of exception.
@ -31,44 +28,32 @@
.macro source
10:
_ASM_EXTABLE_UA(10b, .Lbad_source)
_ASM_EXTABLE_UA(10b, .Lfault)
.endm
.macro dest
20:
_ASM_EXTABLE_UA(20b, .Lbad_dest)
_ASM_EXTABLE_UA(20b, .Lfault)
.endm
/*
* No _ASM_EXTABLE_UA; this is used for intentional prefetch on a
* potentially unmapped kernel address.
*/
.macro ignore L=.Lignore
30:
_ASM_EXTABLE(30b, \L)
.endm
SYM_FUNC_START(csum_partial_copy_generic)
cmpl $3*64, %edx
jle .Lignore
.Lignore:
subq $7*8, %rsp
movq %rbx, 2*8(%rsp)
movq %r12, 3*8(%rsp)
movq %r14, 4*8(%rsp)
movq %r13, 5*8(%rsp)
movq %r15, 6*8(%rsp)
movq %r8, (%rsp)
movq %r9, 1*8(%rsp)
movl %ecx, %eax
movl %edx, %ecx
subq $5*8, %rsp
movq %rbx, 0*8(%rsp)
movq %r12, 1*8(%rsp)
movq %r14, 2*8(%rsp)
movq %r13, 3*8(%rsp)
movq %r15, 4*8(%rsp)
movl $-1, %eax
xorl %r9d, %r9d
movq %rcx, %r12
movl %edx, %ecx
cmpl $8, %ecx
jb .Lshort
testb $7, %sil
jne .Lunaligned
.Laligned:
movl %ecx, %r12d
shrq $6, %r12
jz .Lhandle_tail /* < 64 */
@ -99,7 +84,12 @@ SYM_FUNC_START(csum_partial_copy_generic)
source
movq 56(%rdi), %r13
ignore 2f
30:
/*
* No _ASM_EXTABLE_UA; this is used for intentional prefetch on a
* potentially unmapped kernel address.
*/
_ASM_EXTABLE(30b, 2f)
prefetcht0 5*64(%rdi)
2:
adcq %rbx, %rax
@ -131,8 +121,6 @@ SYM_FUNC_START(csum_partial_copy_generic)
dest
movq %r13, 56(%rsi)
3:
leaq 64(%rdi), %rdi
leaq 64(%rsi), %rsi
@ -142,8 +130,8 @@ SYM_FUNC_START(csum_partial_copy_generic)
/* do last up to 56 bytes */
.Lhandle_tail:
/* ecx: count */
movl %ecx, %r10d
/* ecx: count, rcx.63: the end result needs to be rol8 */
movq %rcx, %r10
andl $63, %ecx
shrl $3, %ecx
jz .Lfold
@ -172,6 +160,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
.Lhandle_7:
movl %r10d, %ecx
andl $7, %ecx
.L1: /* .Lshort rejoins the common path here */
shrl $1, %ecx
jz .Lhandle_1
movl $2, %edx
@ -203,26 +192,65 @@ SYM_FUNC_START(csum_partial_copy_generic)
adcl %r9d, %eax /* carry */
.Lende:
movq 2*8(%rsp), %rbx
movq 3*8(%rsp), %r12
movq 4*8(%rsp), %r14
movq 5*8(%rsp), %r13
movq 6*8(%rsp), %r15
addq $7*8, %rsp
testq %r10, %r10
js .Lwas_odd
.Lout:
movq 0*8(%rsp), %rbx
movq 1*8(%rsp), %r12
movq 2*8(%rsp), %r14
movq 3*8(%rsp), %r13
movq 4*8(%rsp), %r15
addq $5*8, %rsp
ret
.Lshort:
movl %ecx, %r10d
jmp .L1
.Lunaligned:
xorl %ebx, %ebx
testb $1, %sil
jne .Lodd
1: testb $2, %sil
je 2f
source
movw (%rdi), %bx
dest
movw %bx, (%rsi)
leaq 2(%rdi), %rdi
subq $2, %rcx
leaq 2(%rsi), %rsi
addq %rbx, %rax
2: testb $4, %sil
je .Laligned
source
movl (%rdi), %ebx
dest
movl %ebx, (%rsi)
leaq 4(%rdi), %rdi
subq $4, %rcx
leaq 4(%rsi), %rsi
addq %rbx, %rax
jmp .Laligned
/* Exception handlers. Very simple, zeroing is done in the wrappers */
.Lbad_source:
movq (%rsp), %rax
testq %rax, %rax
jz .Lende
movl $-EFAULT, (%rax)
jmp .Lende
.Lodd:
source
movb (%rdi), %bl
dest
movb %bl, (%rsi)
leaq 1(%rdi), %rdi
leaq 1(%rsi), %rsi
/* decrement, set MSB */
leaq -1(%rcx, %rcx), %rcx
rorq $1, %rcx
shll $8, %ebx
addq %rbx, %rax
jmp 1b
.Lbad_dest:
movq 8(%rsp), %rax
testq %rax, %rax
jz .Lende
movl $-EFAULT, (%rax)
jmp .Lende
.Lwas_odd:
roll $8, %eax
jmp .Lout
/* Exception: just return 0 */
.Lfault:
xorl %eax, %eax
jmp .Lout
SYM_FUNC_END(csum_partial_copy_generic)

View File

@ -21,52 +21,16 @@
* src and dst are best aligned to 64bits.
*/
__wsum
csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum isum, int *errp)
csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
__wsum sum;
might_sleep();
*errp = 0;
if (!user_access_begin(src, len))
goto out_err;
/*
* Why 6, not 7? To handle odd addresses aligned we
* would need to do considerable complications to fix the
* checksum which is defined as an 16bit accumulator. The
* fix alignment code is primarily for performance
* compatibility with 32bit and that will handle odd
* addresses slowly too.
*/
if (unlikely((unsigned long)src & 6)) {
while (((unsigned long)src & 6) && len >= 2) {
__u16 val16;
unsafe_get_user(val16, (const __u16 __user *)src, out);
*(__u16 *)dst = val16;
isum = (__force __wsum)add32_with_carry(
(__force unsigned)isum, val16);
src += 2;
dst += 2;
len -= 2;
}
}
isum = csum_partial_copy_generic((__force const void *)src,
dst, len, isum, errp, NULL);
return 0;
sum = csum_partial_copy_generic((__force const void *)src, dst, len);
user_access_end();
if (unlikely(*errp))
goto out_err;
return isum;
out:
user_access_end();
out_err:
*errp = -EFAULT;
memset(dst, 0, len);
return isum;
return sum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@ -82,40 +46,16 @@ EXPORT_SYMBOL(csum_and_copy_from_user);
* src and dst are best aligned to 64bits.
*/
__wsum
csum_and_copy_to_user(const void *src, void __user *dst,
int len, __wsum isum, int *errp)
csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
__wsum ret;
__wsum sum;
might_sleep();
if (!user_access_begin(dst, len)) {
*errp = -EFAULT;
if (!user_access_begin(dst, len))
return 0;
}
if (unlikely((unsigned long)dst & 6)) {
while (((unsigned long)dst & 6) && len >= 2) {
__u16 val16 = *(__u16 *)src;
isum = (__force __wsum)add32_with_carry(
(__force unsigned)isum, val16);
unsafe_put_user(val16, (__u16 __user *)dst, out);
src += 2;
dst += 2;
len -= 2;
}
}
*errp = 0;
ret = csum_partial_copy_generic(src, (void __force *)dst,
len, isum, NULL, errp);
sum = csum_partial_copy_generic(src, (void __force *)dst, len);
user_access_end();
return ret;
out:
user_access_end();
*errp = -EFAULT;
return isum;
return sum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);
@ -129,9 +69,9 @@ EXPORT_SYMBOL(csum_and_copy_to_user);
* Returns an 32bit unfolded checksum of the buffer.
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
return csum_partial_copy_generic(src, dst, len);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);

View File

@ -20,22 +20,6 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static __inline__
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum

View File

@ -35,27 +35,4 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum, int *err_ptr)
{
if (access_ok(dst, len)) {
if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum)-1;
}
return csum_partial(src, len, sum);
}
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
}
#endif

View File

@ -37,32 +37,27 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
#define _HAVE_ARCH_CSUM_AND_COPY
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*/
static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
return csum_partial_copy_generic(src, dst, len);
}
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
int len)
{
if (access_ok(src, len))
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
if (len)
*err_ptr = -EFAULT;
return sum;
if (!access_ok(src, len))
return 0;
return csum_partial_copy_generic((__force const void *)src, dst, len);
}
/*
@ -243,15 +238,10 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
*/
#define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum, int *err_ptr)
void __user *dst, int len)
{
if (access_ok(dst, len))
return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
if (!access_ok(dst, len))
return 0;
return csum_partial_copy_generic(src, (__force void *)dst, len);
}
#endif

View File

@ -175,19 +175,14 @@ ENDPROC(csum_partial)
*/
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
int sum, int *src_err_ptr, int *dst_err_ptr)
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
a2 = src
a3 = dst
a4 = len
a5 = sum
a6 = src_err_ptr
a7 = dst_err_ptr
a8 = temp
a9 = temp
a10 = temp
a11 = original len for exception handling
a12 = original dst for exception handling
This function is optimized for 4-byte aligned addresses. Other
alignments work, but not nearly as efficiently.
@ -196,8 +191,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
ENTRY(csum_partial_copy_generic)
abi_entry_default
mov a12, a3
mov a11, a4
movi a5, -1
or a10, a2, a3
/* We optimize the following alignment tests for the 4-byte
@ -228,26 +222,26 @@ ENTRY(csum_partial_copy_generic)
#endif
EX(10f) l32i a9, a2, 0
EX(10f) l32i a8, a2, 4
EX(11f) s32i a9, a3, 0
EX(11f) s32i a8, a3, 4
EX(10f) s32i a9, a3, 0
EX(10f) s32i a8, a3, 4
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 8
EX(10f) l32i a8, a2, 12
EX(11f) s32i a9, a3, 8
EX(11f) s32i a8, a3, 12
EX(10f) s32i a9, a3, 8
EX(10f) s32i a8, a3, 12
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 16
EX(10f) l32i a8, a2, 20
EX(11f) s32i a9, a3, 16
EX(11f) s32i a8, a3, 20
EX(10f) s32i a9, a3, 16
EX(10f) s32i a8, a3, 20
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
EX(10f) l32i a9, a2, 24
EX(10f) l32i a8, a2, 28
EX(11f) s32i a9, a3, 24
EX(11f) s32i a8, a3, 28
EX(10f) s32i a9, a3, 24
EX(10f) s32i a8, a3, 28
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
addi a2, a2, 32
@ -267,7 +261,7 @@ EX(11f) s32i a8, a3, 28
.Loop6:
#endif
EX(10f) l32i a9, a2, 0
EX(11f) s32i a9, a3, 0
EX(10f) s32i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 4
addi a3, a3, 4
@ -298,7 +292,7 @@ EX(11f) s32i a9, a3, 0
.Loop7:
#endif
EX(10f) l16ui a9, a2, 0
EX(11f) s16i a9, a3, 0
EX(10f) s16i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 2
addi a3, a3, 2
@ -309,7 +303,7 @@ EX(11f) s16i a9, a3, 0
/* This section processes a possible trailing odd byte. */
_bbci.l a4, 0, 8f /* 1-byte chunk */
EX(10f) l8ui a9, a2, 0
EX(11f) s8i a9, a3, 0
EX(10f) s8i a9, a3, 0
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* shift byte to bits 8..15 */
#endif
@ -334,8 +328,8 @@ EX(11f) s8i a9, a3, 0
#endif
EX(10f) l8ui a9, a2, 0
EX(10f) l8ui a8, a2, 1
EX(11f) s8i a9, a3, 0
EX(11f) s8i a8, a3, 1
EX(10f) s8i a9, a3, 0
EX(10f) s8i a8, a3, 1
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* combine into a single 16-bit value */
#else /* for checksum computation */
@ -356,38 +350,7 @@ ENDPROC(csum_partial_copy_generic)
# Exception handler:
.section .fixup, "ax"
/*
a6 = src_err_ptr
a7 = dst_err_ptr
a11 = original len for exception handling
a12 = original dst for exception handling
*/
10:
_movi a2, -EFAULT
s32i a2, a6, 0 /* src_err_ptr */
# clear the complete destination - computing the rest
# is too much work
movi a2, 0
#if XCHAL_HAVE_LOOPS
loopgtz a11, 2f
#else
beqz a11, 2f
add a11, a11, a12 /* a11 = ending address */
.Leloop:
#endif
s8i a2, a12, 0
addi a12, a12, 1
#if !XCHAL_HAVE_LOOPS
blt a12, a11, .Leloop
#endif
2:
abi_ret_default
11:
movi a2, -EFAULT
s32i a2, a7, 0 /* dst_err_ptr */
movi a2, 0
abi_ret_default

View File

@ -1419,8 +1419,7 @@ typhoon_download_firmware(struct typhoon *tp)
* the checksum, we can do this once, at the end.
*/
csum = csum_fold(csum_partial_copy_nocheck(image_data,
dpage, len,
0));
dpage, len));
iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
iowrite32(le16_to_cpu((__force __le16)csum),

View File

@ -1168,7 +1168,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
*(__sum16 *)(skb->data + offset) = 0;
csum = skb_copy_and_csum_bits(skb, start,
nskb->data + start,
skb->len - start, 0);
skb->len - start);
/* add in the header checksums */
if (skb->protocol == htons(ETH_P_IP)) {

View File

@ -16,18 +16,6 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
#ifndef csum_partial_copy_nocheck
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
__wsum sum);
#endif
#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,

View File

@ -3545,7 +3545,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len, __wsum csum);
int len);
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
unsigned int flags);

View File

@ -24,26 +24,32 @@
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
int len)
{
if (copy_from_user(dst, src, len))
*err_ptr = -EFAULT;
return csum_partial(dst, len, sum);
return 0;
return csum_partial(dst, len, ~0U);
}
#endif
#ifndef HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user
(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr)
(const void *src, void __user *dst, int len)
{
sum = csum_partial(src, len, sum);
__wsum sum = csum_partial(src, len, ~0U);
if (copy_to_user(dst, src, len) == 0)
return sum;
if (len)
*err_ptr = -EFAULT;
return 0;
}
#endif
return (__force __wsum)-1; /* invalid checksum */
#ifndef _HAVE_ARCH_CSUM_AND_COPY
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
memcpy(dst, src, len);
return csum_partial(dst, len, 0);
}
#endif

View File

@ -145,17 +145,6 @@ __sum16 ip_compute_csum(const void *buff, int len)
}
EXPORT_SYMBOL(ip_compute_csum);
/*
* copy from ds while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
#ifndef csum_tcpudp_nofold
static inline u32 from64to32(u64 x)
{

View File

@ -581,7 +581,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
__wsum sum, size_t off)
{
__wsum next = csum_partial_copy_nocheck(from, to, len, 0);
__wsum next = csum_partial_copy_nocheck(from, to, len);
return csum_block_add(sum, next, off);
}
@ -1447,15 +1447,14 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
return 0;
}
iterate_and_advance(i, bytes, v, ({
int err = 0;
next = csum_and_copy_from_user(v.iov_base,
(to += v.iov_len) - v.iov_len,
v.iov_len, 0, &err);
if (!err) {
v.iov_len);
if (next) {
sum = csum_block_add(sum, next, off);
off += v.iov_len;
}
err ? v.iov_len : 0;
next ? 0 : v.iov_len;
}), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
@ -1489,11 +1488,10 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
int err = 0;
next = csum_and_copy_from_user(v.iov_base,
(to += v.iov_len) - v.iov_len,
v.iov_len, 0, &err);
if (err)
v.iov_len);
if (!next)
return false;
sum = csum_block_add(sum, next, off);
off += v.iov_len;
@ -1535,15 +1533,14 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
return 0;
}
iterate_and_advance(i, bytes, v, ({
int err = 0;
next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
v.iov_base,
v.iov_len, 0, &err);
if (!err) {
v.iov_len);
if (next) {
sum = csum_block_add(sum, next, off);
off += v.iov_len;
}
err ? v.iov_len : 0;
next ? 0 : v.iov_len;
}), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy(p + v.bv_offset,

View File

@ -2725,19 +2725,20 @@ EXPORT_SYMBOL(skb_checksum);
/* Both of above in one bottle. */
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
u8 *to, int len, __wsum csum)
u8 *to, int len)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int pos = 0;
__wsum csum = 0;
/* Copy header. */
if (copy > 0) {
if (copy > len)
copy = len;
csum = csum_partial_copy_nocheck(skb->data + offset, to,
copy, csum);
copy);
if ((len -= copy) == 0)
return csum;
offset += copy;
@ -2767,7 +2768,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
vaddr = kmap_atomic(p);
csum2 = csum_partial_copy_nocheck(vaddr + p_off,
to + copied,
p_len, 0);
p_len);
kunmap_atomic(vaddr);
csum = csum_block_add(csum, csum2, pos);
pos += p_len;
@ -2793,7 +2794,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
copy = len;
csum2 = skb_copy_and_csum_bits(frag_iter,
offset - start,
to, copy, 0);
to, copy);
csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0)
return csum;
@ -3013,7 +3014,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
csum = 0;
if (csstart != skb->len)
csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
skb->len - csstart, 0);
skb->len - csstart);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
long csstuff = csstart + skb->csum_offset;
@ -3934,7 +3935,7 @@ normal:
skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb,
len),
len, 0);
len);
SKB_GSO_CB(nskb)->csum_start =
skb_headroom(nskb) + doffset;
} else {

View File

@ -352,7 +352,7 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
csum = skb_copy_and_csum_bits(icmp_param->skb,
icmp_param->offset + offset,
to, len, 0);
to, len);
skb->csum = csum_block_add(skb->csum, csum, odd);
if (icmp_pointers[icmp_param->data.icmph.type].error)
@ -376,15 +376,15 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
ip_flush_pending_frames(sk);
} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = icmp_hdr(skb);
__wsum csum = 0;
__wsum csum;
struct sk_buff *skb1;
csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
(char *)icmph,
icmp_param->head_len);
skb_queue_walk(&sk->sk_write_queue, skb1) {
csum = csum_add(csum, skb1->csum);
}
csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
(char *)icmph,
icmp_param->head_len, csum);
icmph->checksum = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk, fl4);

View File

@ -1127,7 +1127,7 @@ alloc_new_skb:
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap, 0);
data + transhdrlen, fraggap);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;
@ -1412,7 +1412,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
skb->csum = skb_copy_and_csum_bits(skb_prev,
maxfraglen,
skb_transport_header(skb),
fraggap, 0);
fraggap);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
pskb_trim_unique(skb_prev, maxfraglen);
@ -1649,7 +1649,7 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
{
__wsum csum;
csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
csum = csum_partial_copy_nocheck(dptr+offset, to, len);
skb->csum = csum_block_add(skb->csum, csum, odd);
return 0;
}

View File

@ -478,7 +478,7 @@ static int raw_getfrag(void *from, char *to, int offset, int len, int odd,
skb->csum = csum_block_add(
skb->csum,
csum_partial_copy_nocheck(rfv->hdr.c + offset,
to, copy, 0),
to, copy),
odd);
odd = 0;

View File

@ -314,10 +314,10 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
{
struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
struct sk_buff *org_skb = msg->skb;
__wsum csum = 0;
__wsum csum;
csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
to, len, csum);
to, len);
skb->csum = csum_block_add(skb->csum, csum, odd);
if (!(msg->type & ICMPV6_INFOMSG_MASK))
nf_ct_attach(skb, org_skb);

View File

@ -1615,7 +1615,7 @@ alloc_new_skb:
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap, 0);
data + transhdrlen, fraggap);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;

View File

@ -746,7 +746,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
skb->csum = csum_block_add(
skb->csum,
csum_partial_copy_nocheck(rfv->c + offset,
to, copy, 0),
to, copy),
odd);
odd = 0;

View File

@ -70,7 +70,7 @@ static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to,
if (len > desc->count)
len = desc->count;
pos = desc->offset;
csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len);
desc->csum = csum_block_add(desc->csum, csum2, pos);
desc->count -= len;
desc->offset += len;