linux-stable/arch/x86/lib/csum-partial_64.c
Eric Dumazet 3411506550 x86/csum: Rewrite/optimize csum_partial()
With more NICs supporting CHECKSUM_COMPLETE, and IPv6 being widely
used csum_partial() is heavily used with small amount of bytes, and is
consuming many cycles.

IPv6 header size, for instance, is 40 bytes.

Another thing to consider is that NET_IP_ALIGN is 0 on x86, meaning
that network headers are not word-aligned, unless the driver forces
this.

This means that csum_partial() fetches one u16 to 'align the buffer',
then performs three u64 additions with carry in a loop, then a
remaining u32, then a remaining u16.

With this new version, it performs a loop only for the 64 bytes blocks,
then the remaining is bisected.

Testing on various CPUs, all of them show a big reduction in
csum_partial() cost (by 50 to 80 %)

Before:
	4.16%  [kernel]       [k] csum_partial
After:
	0.83%  [kernel]       [k] csum_partial

If run in a loop 1,000,000 times:

Before:
	26,922,913      cycles                    # 3846130.429 GHz
	80,302,961      instructions              #    2.98  insn per cycle
	21,059,816      branches                  # 3008545142.857 M/sec
	     2,896      branch-misses             #    0.01% of all branches
After:
	17,960,709      cycles                    # 3592141.800 GHz
	41,292,805      instructions              #    2.30  insn per cycle
	11,058,119      branches                  # 2211623800.000 M/sec
	     2,997      branch-misses             #    0.03% of all branches

 [ bp: Massage, merge in subsequent fixes into a single patch:
   - um compilation error due to missing load_unaligned_zeropad():
	- Reported-by: kernel test robot <lkp@intel.com>
	- Link: https://lkml.kernel.org/r/20211118175239.1525650-1-eric.dumazet@gmail.com
   - Fix initial seed for odd buffers
	- Reported-by: Noah Goldstein <goldstein.w.n@gmail.com>
	- Link: https://lkml.kernel.org/r/20211125141817.3541501-1-eric.dumazet@gmail.com
  ]

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Link: https://lore.kernel.org/r/20211112161950.528886-1-eric.dumazet@gmail.com
2021-12-08 11:26:09 +01:00

149 lines
3.4 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* arch/x86_64/lib/csum-partial.c
*
* This file contains network checksum routines that are better done
* in an architecture-specific manner due to speed.
*/
#include <linux/compiler.h>
#include <linux/export.h>
#include <asm/checksum.h>
#include <asm/word-at-a-time.h>
static inline unsigned short from32to16(unsigned a)
{
unsigned short b = a >> 16;
asm("addw %w2,%w0\n\t"
"adcw $0,%w0\n"
: "=r" (b)
: "0" (b), "r" (a));
return b;
}
/*
* Do a checksum on an arbitrary memory area.
* Returns a 32bit checksum.
*
* This isn't as time critical as it used to be because many NICs
* do hardware checksumming these days.
*
* Still, with CHECKSUM_COMPLETE this is called to compute
* checksums on IPv6 headers (40 bytes) and other small parts.
* it's best to have buff aligned on a 64-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
u64 temp64 = (__force u64)sum;
unsigned odd, result;
odd = 1 & (unsigned long) buff;
if (unlikely(odd)) {
if (unlikely(len == 0))
return sum;
temp64 = ror32((__force u32)sum, 8);
temp64 += (*(unsigned char *)buff << 8);
len--;
buff++;
}
while (unlikely(len >= 64)) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq 4*8(%[src]),%[res]\n\t"
"adcq 5*8(%[src]),%[res]\n\t"
"adcq 6*8(%[src]),%[res]\n\t"
"adcq 7*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
buff += 64;
len -= 64;
}
if (len & 32) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
buff += 32;
}
if (len & 16) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
buff += 16;
}
if (len & 8) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
buff += 8;
}
if (len & 7) {
#ifdef CONFIG_DCACHE_WORD_ACCESS
unsigned int shift = (8 - (len & 7)) * 8;
unsigned long trail;
trail = (load_unaligned_zeropad(buff) << shift) >> shift;
asm("addq %[trail],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [trail] "r" (trail));
#else
if (len & 4) {
asm("addq %[val],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [val] "r" ((u64)*(u32 *)buff)
: "memory");
buff += 4;
}
if (len & 2) {
asm("addq %[val],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [val] "r" ((u64)*(u16 *)buff)
: "memory");
buff += 2;
}
if (len & 1) {
asm("addq %[val],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [val] "r" ((u64)*(u8 *)buff)
: "memory");
}
#endif
}
result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
if (unlikely(odd)) {
result = from32to16(result);
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
}
return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
__sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff,len,0));
}
EXPORT_SYMBOL(ip_compute_csum);