crypto: cast6-avx - tune assembler code for more performance

Patch replaces 'movb' instructions with 'movzbl' to break false register
dependencies, interleaves instructions better for out-of-order scheduling
and merges constant 16-bit rotation with round-key variable rotation.

tcrypt ECB results:

Intel Core i5-2450M:

size    old-vs-new      new-vs-generic  old-vs-generic
        enc     dec     enc     dec     enc     dec
256     1.13x   1.19x   2.05x   2.17x   1.82x   1.82x
1k      1.18x   1.21x   2.26x   2.33x   1.93x   1.93x
8k      1.19x   1.19x   2.32x   2.33x   1.95x   1.95x

[v2]
 - Do instruction interleaving another way to avoid adding new FPU<=>CPU
   register moves as these cause performance drop on Bulldozer.
 - Improvements to round-key variable rotation handling.
 - Further interleaving improvements for better out-of-order scheduling.

Cc: Johannes Goetzfried <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Jussi Kivilinna 2012-08-28 14:24:54 +03:00 committed by Herbert Xu
parent ddaea7869d
commit c09220e1bc

View file

@ -4,6 +4,8 @@
* Copyright (C) 2012 Johannes Goetzfried * Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
* *
* Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or * the Free Software Foundation; either version 2 of the License, or
@ -22,7 +24,6 @@
*/ */
.file "cast6-avx-x86_64-asm_64.S" .file "cast6-avx-x86_64-asm_64.S"
.text
.extern cast6_s1 .extern cast6_s1
.extern cast6_s2 .extern cast6_s2
@ -54,20 +55,21 @@
#define RC2 %xmm6 #define RC2 %xmm6
#define RD2 %xmm7 #define RD2 %xmm7
#define RX %xmm8 #define RX %xmm8
#define RKM %xmm9 #define RKM %xmm9
#define RKRF %xmm10 #define RKR %xmm10
#define RKRR %xmm11 #define RKRF %xmm11
#define RKRR %xmm12
#define R32 %xmm13
#define R1ST %xmm14
#define RTMP %xmm12 #define RTMP %xmm15
#define RMASK %xmm13
#define R32 %xmm14
#define RID1 %rax #define RID1 %rbp
#define RID1b %al #define RID1d %ebp
#define RID2 %rbx #define RID2 %rsi
#define RID2b %bl #define RID2d %esi
#define RGI1 %rdx #define RGI1 %rdx
#define RGI1bl %dl #define RGI1bl %dl
@ -76,6 +78,13 @@
#define RGI2bl %cl #define RGI2bl %cl
#define RGI2bh %ch #define RGI2bh %ch
#define RGI3 %rax
#define RGI3bl %al
#define RGI3bh %ah
#define RGI4 %rbx
#define RGI4bl %bl
#define RGI4bh %bh
#define RFS1 %r8 #define RFS1 %r8
#define RFS1d %r8d #define RFS1d %r8d
#define RFS2 %r9 #define RFS2 %r9
@ -84,95 +93,106 @@
#define RFS3d %r10d #define RFS3d %r10d
#define lookup_32bit(src, dst, op1, op2, op3) \ #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
movb src ## bl, RID1b; \ movzbl src ## bh, RID1d; \
movb src ## bh, RID2b; \ movzbl src ## bl, RID2d; \
shrq $16, src; \
movl s1(, RID1, 4), dst ## d; \ movl s1(, RID1, 4), dst ## d; \
op1 s2(, RID2, 4), dst ## d; \ op1 s2(, RID2, 4), dst ## d; \
shrq $16, src; \ movzbl src ## bh, RID1d; \
movb src ## bl, RID1b; \ movzbl src ## bl, RID2d; \
movb src ## bh, RID2b; \ interleave_op(il_reg); \
op2 s3(, RID1, 4), dst ## d; \ op2 s3(, RID1, 4), dst ## d; \
op3 s4(, RID2, 4), dst ## d; op3 s4(, RID2, 4), dst ## d;
#define F(a, x, op0, op1, op2, op3) \ #define dummy(d) /* do nothing */
#define shr_next(reg) \
shrq $16, reg;
#define F_head(a, x, gi1, gi2, op0) \
op0 a, RKM, x; \ op0 a, RKM, x; \
vpslld RKRF, x, RTMP; \ vpslld RKRF, x, RTMP; \
vpsrld RKRR, x, x; \ vpsrld RKRR, x, x; \
vpor RTMP, x, x; \ vpor RTMP, x, x; \
\ \
vpshufb RMASK, x, x; \ vmovq x, gi1; \
vmovq x, RGI1; \ vpextrq $1, x, gi2;
vpsrldq $8, x, x; \
vmovq x, RGI2; \ #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
\ \
lookup_32bit(RGI1, RFS1, op1, op2, op3); \ lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
shrq $16, RGI1; \ shlq $32, RFS2; \
lookup_32bit(RGI1, RFS2, op1, op2, op3); \ orq RFS1, RFS2; \
shlq $32, RFS2; \ lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
orq RFS1, RFS2; \ shlq $32, RFS1; \
orq RFS1, RFS3; \
\ \
lookup_32bit(RGI2, RFS1, op1, op2, op3); \ vmovq RFS2, x; \
shrq $16, RGI2; \
lookup_32bit(RGI2, RFS3, op1, op2, op3); \
shlq $32, RFS3; \
orq RFS1, RFS3; \
\
vmovq RFS2, x; \
vpinsrq $1, RFS3, x, x; vpinsrq $1, RFS3, x, x;
#define F1(b, x) F(b, x, vpaddd, xorl, subl, addl) #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
#define F2(b, x) F(b, x, vpxor, subl, addl, xorl) F_head(b1, RX, RGI1, RGI2, op0); \
#define F3(b, x) F(b, x, vpsubd, addl, xorl, subl) F_head(b2, RX, RGI3, RGI4, op0); \
\
F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
\
vpxor a1, RX, a1; \
vpxor a2, RTMP, a2;
#define qop(in, out, x, f) \ #define F1_2(a1, b1, a2, b2) \
F ## f(in ## 1, x); \ F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
vpxor out ## 1, x, out ## 1; \ #define F2_2(a1, b1, a2, b2) \
F ## f(in ## 2, x); \ F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
vpxor out ## 2, x, out ## 2; \ #define F3_2(a1, b1, a2, b2) \
F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
#define qop(in, out, f) \
F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
#define get_round_keys(nn) \
vbroadcastss (km+(4*(nn)))(CTX), RKM; \
vpand R1ST, RKR, RKRF; \
vpsubq RKRF, R32, RKRR; \
vpsrldq $1, RKR, RKR;
#define Q(n) \ #define Q(n) \
vbroadcastss (km+(4*(4*n+0)))(CTX), RKM; \ get_round_keys(4*n+0); \
vpinsrb $0, (kr+(4*n+0))(CTX), RKRF, RKRF; \ qop(RD, RC, 1); \
vpsubq RKRF, R32, RKRR; \
qop(RD, RC, RX, 1); \
\ \
vbroadcastss (km+(4*(4*n+1)))(CTX), RKM; \ get_round_keys(4*n+1); \
vpinsrb $0, (kr+(4*n+1))(CTX), RKRF, RKRF; \ qop(RC, RB, 2); \
vpsubq RKRF, R32, RKRR; \
qop(RC, RB, RX, 2); \
\ \
vbroadcastss (km+(4*(4*n+2)))(CTX), RKM; \ get_round_keys(4*n+2); \
vpinsrb $0, (kr+(4*n+2))(CTX), RKRF, RKRF; \ qop(RB, RA, 3); \
vpsubq RKRF, R32, RKRR; \
qop(RB, RA, RX, 3); \
\ \
vbroadcastss (km+(4*(4*n+3)))(CTX), RKM; \ get_round_keys(4*n+3); \
vpinsrb $0, (kr+(4*n+3))(CTX), RKRF, RKRF; \ qop(RA, RD, 1);
vpsubq RKRF, R32, RKRR; \
qop(RA, RD, RX, 1);
#define QBAR(n) \ #define QBAR(n) \
vbroadcastss (km+(4*(4*n+3)))(CTX), RKM; \ get_round_keys(4*n+3); \
vpinsrb $0, (kr+(4*n+3))(CTX), RKRF, RKRF; \ qop(RA, RD, 1); \
vpsubq RKRF, R32, RKRR; \
qop(RA, RD, RX, 1); \
\ \
vbroadcastss (km+(4*(4*n+2)))(CTX), RKM; \ get_round_keys(4*n+2); \
vpinsrb $0, (kr+(4*n+2))(CTX), RKRF, RKRF; \ qop(RB, RA, 3); \
vpsubq RKRF, R32, RKRR; \
qop(RB, RA, RX, 3); \
\ \
vbroadcastss (km+(4*(4*n+1)))(CTX), RKM; \ get_round_keys(4*n+1); \
vpinsrb $0, (kr+(4*n+1))(CTX), RKRF, RKRF; \ qop(RC, RB, 2); \
vpsubq RKRF, R32, RKRR; \
qop(RC, RB, RX, 2); \
\ \
vbroadcastss (km+(4*(4*n+0)))(CTX), RKM; \ get_round_keys(4*n+0); \
vpinsrb $0, (kr+(4*n+0))(CTX), RKRF, RKRF; \ qop(RD, RC, 1);
vpsubq RKRF, R32, RKRR; \
qop(RD, RC, RX, 1);
#define shuffle(mask) \
vpshufb mask, RKR, RKR;
#define preload_rkr(n, do_mask, mask) \
vbroadcastss .L16_mask, RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor (kr+n*16)(CTX), RKR, RKR; \
do_mask(mask);
#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
vpunpckldq x1, x0, t0; \ vpunpckldq x1, x0, t0; \
@ -185,37 +205,37 @@
vpunpcklqdq x3, t2, x2; \ vpunpcklqdq x3, t2, x2; \
vpunpckhqdq x3, t2, x3; vpunpckhqdq x3, t2, x3;
#define inpack_blocks(in, x0, x1, x2, x3, t0, t1, t2) \ #define inpack_blocks(in, x0, x1, x2, x3, t0, t1, t2, rmask) \
vmovdqu (0*4*4)(in), x0; \ vmovdqu (0*4*4)(in), x0; \
vmovdqu (1*4*4)(in), x1; \ vmovdqu (1*4*4)(in), x1; \
vmovdqu (2*4*4)(in), x2; \ vmovdqu (2*4*4)(in), x2; \
vmovdqu (3*4*4)(in), x3; \ vmovdqu (3*4*4)(in), x3; \
vpshufb RMASK, x0, x0; \ vpshufb rmask, x0, x0; \
vpshufb RMASK, x1, x1; \ vpshufb rmask, x1, x1; \
vpshufb RMASK, x2, x2; \ vpshufb rmask, x2, x2; \
vpshufb RMASK, x3, x3; \ vpshufb rmask, x3, x3; \
\ \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
#define outunpack_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ #define outunpack_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\ \
vpshufb RMASK, x0, x0; \ vpshufb rmask, x0, x0; \
vpshufb RMASK, x1, x1; \ vpshufb rmask, x1, x1; \
vpshufb RMASK, x2, x2; \ vpshufb rmask, x2, x2; \
vpshufb RMASK, x3, x3; \ vpshufb rmask, x3, x3; \
vmovdqu x0, (0*4*4)(out); \ vmovdqu x0, (0*4*4)(out); \
vmovdqu x1, (1*4*4)(out); \ vmovdqu x1, (1*4*4)(out); \
vmovdqu x2, (2*4*4)(out); \ vmovdqu x2, (2*4*4)(out); \
vmovdqu x3, (3*4*4)(out); vmovdqu x3, (3*4*4)(out);
#define outunpack_xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ #define outunpack_xor_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\ \
vpshufb RMASK, x0, x0; \ vpshufb rmask, x0, x0; \
vpshufb RMASK, x1, x1; \ vpshufb rmask, x1, x1; \
vpshufb RMASK, x2, x2; \ vpshufb rmask, x2, x2; \
vpshufb RMASK, x3, x3; \ vpshufb rmask, x3, x3; \
vpxor (0*4*4)(out), x0, x0; \ vpxor (0*4*4)(out), x0, x0; \
vmovdqu x0, (0*4*4)(out); \ vmovdqu x0, (0*4*4)(out); \
vpxor (1*4*4)(out), x1, x1; \ vpxor (1*4*4)(out), x1, x1; \
@ -225,11 +245,29 @@
vpxor (3*4*4)(out), x3, x3; \ vpxor (3*4*4)(out), x3, x3; \
vmovdqu x3, (3*4*4)(out); vmovdqu x3, (3*4*4)(out);
.data
.align 16 .align 16
.Lbswap_mask: .Lbswap_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.Lrkr_enc_Q_Q_QBAR_QBAR:
.byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.Lrkr_dec_Q_Q_Q_Q:
.byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
.Lrkr_dec_Q_Q_QBAR_QBAR:
.byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
.L16_mask:
.byte 16, 16, 16, 16
.L32_mask: .L32_mask:
.byte 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0 .byte 32, 0, 0, 0
.Lfirst_mask:
.byte 0x1f, 0, 0, 0
.text
.align 16 .align 16
.global __cast6_enc_blk_8way .global __cast6_enc_blk_8way
@ -243,28 +281,31 @@ __cast6_enc_blk_8way:
* %rcx: bool, if true: xor output * %rcx: bool, if true: xor output
*/ */
pushq %rbp;
pushq %rbx; pushq %rbx;
pushq %rcx; pushq %rcx;
vmovdqu .Lbswap_mask, RMASK; vmovdqa .Lbswap_mask, RKM;
vmovdqu .L32_mask, R32; vmovd .Lfirst_mask, R1ST;
vpxor RKRF, RKRF, RKRF; vmovd .L32_mask, R32;
leaq (4*4*4)(%rdx), %rax; leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKM); inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKM); inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
xorq RID1, RID1; movq %rsi, %r11;
xorq RID2, RID2;
preload_rkr(0, dummy, none);
Q(0); Q(0);
Q(1); Q(1);
Q(2); Q(2);
Q(3); Q(3);
preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
Q(4); Q(4);
Q(5); Q(5);
QBAR(6); QBAR(6);
QBAR(7); QBAR(7);
preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
QBAR(8); QBAR(8);
QBAR(9); QBAR(9);
QBAR(10); QBAR(10);
@ -272,20 +313,22 @@ __cast6_enc_blk_8way:
popq %rcx; popq %rcx;
popq %rbx; popq %rbx;
popq %rbp;
leaq (4*4*4)(%rsi), %rax; vmovdqa .Lbswap_mask, RKM;
leaq (4*4*4)(%r11), %rax;
testb %cl, %cl; testb %cl, %cl;
jnz __enc_xor8; jnz __enc_xor8;
outunpack_blocks(%rsi, RA1, RB1, RC1, RD1, RTMP, RX, RKM); outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKM); outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret; ret;
__enc_xor8: __enc_xor8:
outunpack_xor_blocks(%rsi, RA1, RB1, RC1, RD1, RTMP, RX, RKM); outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKM); outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret; ret;
@ -300,36 +343,41 @@ cast6_dec_blk_8way:
* %rdx: src * %rdx: src
*/ */
pushq %rbp;
pushq %rbx; pushq %rbx;
vmovdqu .Lbswap_mask, RMASK; vmovdqa .Lbswap_mask, RKM;
vmovdqu .L32_mask, R32; vmovd .Lfirst_mask, R1ST;
vpxor RKRF, RKRF, RKRF; vmovd .L32_mask, R32;
leaq (4*4*4)(%rdx), %rax; leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKM); inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKM); inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
xorq RID1, RID1; movq %rsi, %r11;
xorq RID2, RID2;
preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
Q(11); Q(11);
Q(10); Q(10);
Q(9); Q(9);
Q(8); Q(8);
preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
Q(7); Q(7);
Q(6); Q(6);
QBAR(5); QBAR(5);
QBAR(4); QBAR(4);
preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
QBAR(3); QBAR(3);
QBAR(2); QBAR(2);
QBAR(1); QBAR(1);
QBAR(0); QBAR(0);
popq %rbx; popq %rbx;
popq %rbp;
leaq (4*4*4)(%rsi), %rax; vmovdqa .Lbswap_mask, RKM;
outunpack_blocks(%rsi, RA1, RB1, RC1, RD1, RTMP, RX, RKM); leaq (4*4*4)(%r11), %rax;
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKM); outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret; ret;