x86/csum: Rewrite/optimize csum_partial()
With more NICs supporting CHECKSUM_COMPLETE, and IPv6 being widely used csum_partial() is heavily used with small amount of bytes, and is consuming many cycles. IPv6 header size, for instance, is 40 bytes. Another thing to consider is that NET_IP_ALIGN is 0 on x86, meaning that network headers are not word-aligned, unless the driver forces this. This means that csum_partial() fetches one u16 to 'align the buffer', then performs three u64 additions with carry in a loop, then a remaining u32, then a remaining u16. With this new version, it performs a loop only for the 64 bytes blocks, then the remaining is bisected. Testing on various CPUs, all of them show a big reduction in csum_partial() cost (by 50 to 80 %) Before: 4.16% [kernel] [k] csum_partial After: 0.83% [kernel] [k] csum_partial If run in a loop 1,000,000 times: Before: 26,922,913 cycles # 3846130.429 GHz 80,302,961 instructions # 2.98 insn per cycle 21,059,816 branches # 3008545142.857 M/sec 2,896 branch-misses # 0.01% of all branches After: 17,960,709 cycles # 3592141.800 GHz 41,292,805 instructions # 2.30 insn per cycle 11,058,119 branches # 2211623800.000 M/sec 2,997 branch-misses # 0.03% of all branches [ bp: Massage, merge in subsequent fixes into a single patch: - um compilation error due to missing load_unaligned_zeropad(): - Reported-by: kernel test robot <lkp@intel.com> - Link: https://lkml.kernel.org/r/20211118175239.1525650-1-eric.dumazet@gmail.com - Fix initial seed for odd buffers - Reported-by: Noah Goldstein <goldstein.w.n@gmail.com> - Link: https://lkml.kernel.org/r/20211125141817.3541501-1-eric.dumazet@gmail.com ] Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> Link: https://lore.kernel.org/r/20211112161950.528886-1-eric.dumazet@gmail.com
This commit is contained in:
Родитель
0fcfb00b28
Коммит
3411506550
|
@ -9,6 +9,7 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/word-at-a-time.h>
|
||||
|
||||
static inline unsigned short from32to16(unsigned a)
|
||||
{
|
||||
|
@ -21,120 +22,119 @@ static inline unsigned short from32to16(unsigned a)
|
|||
}
|
||||
|
||||
/*
|
||||
* Do a 64-bit checksum on an arbitrary memory area.
|
||||
* Do a checksum on an arbitrary memory area.
|
||||
* Returns a 32bit checksum.
|
||||
*
|
||||
* This isn't as time critical as it used to be because many NICs
|
||||
* do hardware checksumming these days.
|
||||
*
|
||||
* Things tried and found to not make it faster:
|
||||
* Manual Prefetching
|
||||
* Unrolling to an 128 bytes inner loop.
|
||||
* Using interleaving with more registers to break the carry chains.
|
||||
*/
|
||||
static unsigned do_csum(const unsigned char *buff, unsigned len)
|
||||
{
|
||||
unsigned odd, count;
|
||||
unsigned long result = 0;
|
||||
|
||||
if (unlikely(len == 0))
|
||||
return result;
|
||||
odd = 1 & (unsigned long) buff;
|
||||
if (unlikely(odd)) {
|
||||
result = *buff << 8;
|
||||
len--;
|
||||
buff++;
|
||||
}
|
||||
count = len >> 1; /* nr of 16-bit words.. */
|
||||
if (count) {
|
||||
if (2 & (unsigned long) buff) {
|
||||
result += *(unsigned short *)buff;
|
||||
count--;
|
||||
len -= 2;
|
||||
buff += 2;
|
||||
}
|
||||
count >>= 1; /* nr of 32-bit words.. */
|
||||
if (count) {
|
||||
unsigned long zero;
|
||||
unsigned count64;
|
||||
if (4 & (unsigned long) buff) {
|
||||
result += *(unsigned int *) buff;
|
||||
count--;
|
||||
len -= 4;
|
||||
buff += 4;
|
||||
}
|
||||
count >>= 1; /* nr of 64-bit words.. */
|
||||
|
||||
/* main loop using 64byte blocks */
|
||||
zero = 0;
|
||||
count64 = count >> 3;
|
||||
while (count64) {
|
||||
asm("addq 0*8(%[src]),%[res]\n\t"
|
||||
"adcq 1*8(%[src]),%[res]\n\t"
|
||||
"adcq 2*8(%[src]),%[res]\n\t"
|
||||
"adcq 3*8(%[src]),%[res]\n\t"
|
||||
"adcq 4*8(%[src]),%[res]\n\t"
|
||||
"adcq 5*8(%[src]),%[res]\n\t"
|
||||
"adcq 6*8(%[src]),%[res]\n\t"
|
||||
"adcq 7*8(%[src]),%[res]\n\t"
|
||||
"adcq %[zero],%[res]"
|
||||
: [res] "=r" (result)
|
||||
: [src] "r" (buff), [zero] "r" (zero),
|
||||
"[res]" (result));
|
||||
buff += 64;
|
||||
count64--;
|
||||
}
|
||||
|
||||
/* last up to 7 8byte blocks */
|
||||
count %= 8;
|
||||
while (count) {
|
||||
asm("addq %1,%0\n\t"
|
||||
"adcq %2,%0\n"
|
||||
: "=r" (result)
|
||||
: "m" (*(unsigned long *)buff),
|
||||
"r" (zero), "0" (result));
|
||||
--count;
|
||||
buff += 8;
|
||||
}
|
||||
result = add32_with_carry(result>>32,
|
||||
result&0xffffffff);
|
||||
|
||||
if (len & 4) {
|
||||
result += *(unsigned int *) buff;
|
||||
buff += 4;
|
||||
}
|
||||
}
|
||||
if (len & 2) {
|
||||
result += *(unsigned short *) buff;
|
||||
buff += 2;
|
||||
}
|
||||
}
|
||||
if (len & 1)
|
||||
result += *buff;
|
||||
result = add32_with_carry(result>>32, result & 0xffffffff);
|
||||
if (unlikely(odd)) {
|
||||
result = from32to16(result);
|
||||
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* computes the checksum of a memory block at buff, length len,
|
||||
* and adds in "sum" (32-bit)
|
||||
*
|
||||
* returns a 32-bit number suitable for feeding into itself
|
||||
* or csum_tcpudp_magic
|
||||
*
|
||||
* this function must be called with even lengths, except
|
||||
* for the last fragment, which may be odd
|
||||
*
|
||||
* Still, with CHECKSUM_COMPLETE this is called to compute
|
||||
* checksums on IPv6 headers (40 bytes) and other small parts.
|
||||
* it's best to have buff aligned on a 64-bit boundary
|
||||
*/
|
||||
__wsum csum_partial(const void *buff, int len, __wsum sum)
|
||||
{
|
||||
return (__force __wsum)add32_with_carry(do_csum(buff, len),
|
||||
(__force u32)sum);
|
||||
u64 temp64 = (__force u64)sum;
|
||||
unsigned odd, result;
|
||||
|
||||
odd = 1 & (unsigned long) buff;
|
||||
if (unlikely(odd)) {
|
||||
if (unlikely(len == 0))
|
||||
return sum;
|
||||
temp64 = ror32((__force u32)sum, 8);
|
||||
temp64 += (*(unsigned char *)buff << 8);
|
||||
len--;
|
||||
buff++;
|
||||
}
|
||||
|
||||
while (unlikely(len >= 64)) {
|
||||
asm("addq 0*8(%[src]),%[res]\n\t"
|
||||
"adcq 1*8(%[src]),%[res]\n\t"
|
||||
"adcq 2*8(%[src]),%[res]\n\t"
|
||||
"adcq 3*8(%[src]),%[res]\n\t"
|
||||
"adcq 4*8(%[src]),%[res]\n\t"
|
||||
"adcq 5*8(%[src]),%[res]\n\t"
|
||||
"adcq 6*8(%[src]),%[res]\n\t"
|
||||
"adcq 7*8(%[src]),%[res]\n\t"
|
||||
"adcq $0,%[res]"
|
||||
: [res] "+r" (temp64)
|
||||
: [src] "r" (buff)
|
||||
: "memory");
|
||||
buff += 64;
|
||||
len -= 64;
|
||||
}
|
||||
|
||||
if (len & 32) {
|
||||
asm("addq 0*8(%[src]),%[res]\n\t"
|
||||
"adcq 1*8(%[src]),%[res]\n\t"
|
||||
"adcq 2*8(%[src]),%[res]\n\t"
|
||||
"adcq 3*8(%[src]),%[res]\n\t"
|
||||
"adcq $0,%[res]"
|
||||
: [res] "+r" (temp64)
|
||||
: [src] "r" (buff)
|
||||
: "memory");
|
||||
buff += 32;
|
||||
}
|
||||
if (len & 16) {
|
||||
asm("addq 0*8(%[src]),%[res]\n\t"
|
||||
"adcq 1*8(%[src]),%[res]\n\t"
|
||||
"adcq $0,%[res]"
|
||||
: [res] "+r" (temp64)
|
||||
: [src] "r" (buff)
|
||||
: "memory");
|
||||
buff += 16;
|
||||
}
|
||||
if (len & 8) {
|
||||
asm("addq 0*8(%[src]),%[res]\n\t"
|
||||
"adcq $0,%[res]"
|
||||
: [res] "+r" (temp64)
|
||||
: [src] "r" (buff)
|
||||
: "memory");
|
||||
buff += 8;
|
||||
}
|
||||
if (len & 7) {
|
||||
#ifdef CONFIG_DCACHE_WORD_ACCESS
|
||||
unsigned int shift = (8 - (len & 7)) * 8;
|
||||
unsigned long trail;
|
||||
|
||||
trail = (load_unaligned_zeropad(buff) << shift) >> shift;
|
||||
|
||||
asm("addq %[trail],%[res]\n\t"
|
||||
"adcq $0,%[res]"
|
||||
: [res] "+r" (temp64)
|
||||
: [trail] "r" (trail));
|
||||
#else
|
||||
if (len & 4) {
|
||||
asm("addq %[val],%[res]\n\t"
|
||||
"adcq $0,%[res]"
|
||||
: [res] "+r" (temp64)
|
||||
: [val] "r" ((u64)*(u32 *)buff)
|
||||
: "memory");
|
||||
buff += 4;
|
||||
}
|
||||
if (len & 2) {
|
||||
asm("addq %[val],%[res]\n\t"
|
||||
"adcq $0,%[res]"
|
||||
: [res] "+r" (temp64)
|
||||
: [val] "r" ((u64)*(u16 *)buff)
|
||||
: "memory");
|
||||
buff += 2;
|
||||
}
|
||||
if (len & 1) {
|
||||
asm("addq %[val],%[res]\n\t"
|
||||
"adcq $0,%[res]"
|
||||
: [res] "+r" (temp64)
|
||||
: [val] "r" ((u64)*(u8 *)buff)
|
||||
: "memory");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
|
||||
if (unlikely(odd)) {
|
||||
result = from32to16(result);
|
||||
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
|
||||
}
|
||||
return (__force __wsum)result;
|
||||
}
|
||||
EXPORT_SYMBOL(csum_partial);
|
||||
|
||||
|
@ -147,4 +147,3 @@ __sum16 ip_compute_csum(const void *buff, int len)
|
|||
return csum_fold(csum_partial(buff,len,0));
|
||||
}
|
||||
EXPORT_SYMBOL(ip_compute_csum);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче