Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (54 commits)
  crypto: gf128mul - remove leftover "(EXPERIMENTAL)" in Kconfig
  crypto: serpent-sse2 - remove unneeded LRW/XTS #ifdefs
  crypto: serpent-sse2 - select LRW and XTS
  crypto: twofish-x86_64-3way - remove unneeded LRW/XTS #ifdefs
  crypto: twofish-x86_64-3way - select LRW and XTS
  crypto: xts - remove dependency on EXPERIMENTAL
  crypto: lrw - remove dependency on EXPERIMENTAL
  crypto: picoxcell - fix boolean and / or confusion
  crypto: caam - remove DECO access initialization code
  crypto: caam - fix polarity of "propagate error" logic
  crypto: caam - more desc.h cleanups
  crypto: caam - desc.h - convert spaces to tabs
  crypto: talitos - convert talitos_error to struct device
  crypto: talitos - remove NO_IRQ references
  crypto: talitos - fix bad kfree
  crypto: convert drivers/crypto/* to use module_platform_driver()
  char: hw_random: convert drivers/char/hw_random/* to use module_platform_driver()
  crypto: serpent-sse2 - should select CRYPTO_CRYPTD
  crypto: serpent - rename serpent.c to serpent_generic.c
  crypto: serpent - cleanup checkpatch errors and warnings
  ...
This commit is contained in:
Linus Torvalds 2012-01-10 22:01:27 -08:00
Родитель e7691a1ce3 08c70fc3a2
Коммит 4f58cb90bc
44 изменённых файлов: 8606 добавлений и 1971 удалений

Просмотреть файл

@ -5,12 +5,14 @@
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
@ -20,12 +22,14 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
aes-i586-y := aes-i586-asm_32.o aes_glue.o
twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o

Просмотреть файл

@ -0,0 +1,638 @@
/*
* Serpent Cipher 4-way parallel algorithm (i586/SSE2)
*
* Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* Based on crypto/serpent.c by
* Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
* 2003 Herbert Valerio Riedel <hvr@gnu.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
.file "serpent-sse2-i586-asm_32.S"
.text
#define arg_ctx 4
#define arg_dst 8
#define arg_src 12
#define arg_xor 16
/**********************************************************************
4-way SSE2 serpent
**********************************************************************/
#define CTX %edx
#define RA %xmm0
#define RB %xmm1
#define RC %xmm2
#define RD %xmm3
#define RE %xmm4
#define RT0 %xmm5
#define RT1 %xmm6
#define RNOT %xmm7
#define get_key(i, j, t) \
movd (4*(i)+(j))*4(CTX), t; \
pshufd $0, t, t;
#define K(x0, x1, x2, x3, x4, i) \
get_key(i, 0, x4); \
get_key(i, 1, RT0); \
get_key(i, 2, RT1); \
pxor x4, x0; \
pxor RT0, x1; \
pxor RT1, x2; \
get_key(i, 3, x4); \
pxor x4, x3;
#define LK(x0, x1, x2, x3, x4, i) \
movdqa x0, x4; \
pslld $13, x0; \
psrld $(32 - 13), x4; \
por x4, x0; \
pxor x0, x1; \
movdqa x2, x4; \
pslld $3, x2; \
psrld $(32 - 3), x4; \
por x4, x2; \
pxor x2, x1; \
movdqa x1, x4; \
pslld $1, x1; \
psrld $(32 - 1), x4; \
por x4, x1; \
movdqa x0, x4; \
pslld $3, x4; \
pxor x2, x3; \
pxor x4, x3; \
movdqa x3, x4; \
pslld $7, x3; \
psrld $(32 - 7), x4; \
por x4, x3; \
movdqa x1, x4; \
pslld $7, x4; \
pxor x1, x0; \
pxor x3, x0; \
pxor x3, x2; \
pxor x4, x2; \
movdqa x0, x4; \
get_key(i, 1, RT0); \
pxor RT0, x1; \
get_key(i, 3, RT0); \
pxor RT0, x3; \
pslld $5, x0; \
psrld $(32 - 5), x4; \
por x4, x0; \
movdqa x2, x4; \
pslld $22, x2; \
psrld $(32 - 22), x4; \
por x4, x2; \
get_key(i, 0, RT0); \
pxor RT0, x0; \
get_key(i, 2, RT0); \
pxor RT0, x2;
#define KL(x0, x1, x2, x3, x4, i) \
K(x0, x1, x2, x3, x4, i); \
movdqa x0, x4; \
psrld $5, x0; \
pslld $(32 - 5), x4; \
por x4, x0; \
movdqa x2, x4; \
psrld $22, x2; \
pslld $(32 - 22), x4; \
por x4, x2; \
pxor x3, x2; \
pxor x3, x0; \
movdqa x1, x4; \
pslld $7, x4; \
pxor x1, x0; \
pxor x4, x2; \
movdqa x1, x4; \
psrld $1, x1; \
pslld $(32 - 1), x4; \
por x4, x1; \
movdqa x3, x4; \
psrld $7, x3; \
pslld $(32 - 7), x4; \
por x4, x3; \
pxor x0, x1; \
movdqa x0, x4; \
pslld $3, x4; \
pxor x4, x3; \
movdqa x0, x4; \
psrld $13, x0; \
pslld $(32 - 13), x4; \
por x4, x0; \
pxor x2, x1; \
pxor x2, x3; \
movdqa x2, x4; \
psrld $3, x2; \
pslld $(32 - 3), x4; \
por x4, x2;
#define S0(x0, x1, x2, x3, x4) \
movdqa x3, x4; \
por x0, x3; \
pxor x4, x0; \
pxor x2, x4; \
pxor RNOT, x4; \
pxor x1, x3; \
pand x0, x1; \
pxor x4, x1; \
pxor x0, x2; \
pxor x3, x0; \
por x0, x4; \
pxor x2, x0; \
pand x1, x2; \
pxor x2, x3; \
pxor RNOT, x1; \
pxor x4, x2; \
pxor x2, x1;
#define S1(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
pxor x0, x1; \
pxor x3, x0; \
pxor RNOT, x3; \
pand x1, x4; \
por x1, x0; \
pxor x2, x3; \
pxor x3, x0; \
pxor x3, x1; \
pxor x4, x3; \
por x4, x1; \
pxor x2, x4; \
pand x0, x2; \
pxor x1, x2; \
por x0, x1; \
pxor RNOT, x0; \
pxor x2, x0; \
pxor x1, x4;
#define S2(x0, x1, x2, x3, x4) \
pxor RNOT, x3; \
pxor x0, x1; \
movdqa x0, x4; \
pand x2, x0; \
pxor x3, x0; \
por x4, x3; \
pxor x1, x2; \
pxor x1, x3; \
pand x0, x1; \
pxor x2, x0; \
pand x3, x2; \
por x1, x3; \
pxor RNOT, x0; \
pxor x0, x3; \
pxor x0, x4; \
pxor x2, x0; \
por x2, x1;
#define S3(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
pxor x3, x1; \
por x0, x3; \
pand x0, x4; \
pxor x2, x0; \
pxor x1, x2; \
pand x3, x1; \
pxor x3, x2; \
por x4, x0; \
pxor x3, x4; \
pxor x0, x1; \
pand x3, x0; \
pand x4, x3; \
pxor x2, x3; \
por x1, x4; \
pand x1, x2; \
pxor x3, x4; \
pxor x3, x0; \
pxor x2, x3;
#define S4(x0, x1, x2, x3, x4) \
movdqa x3, x4; \
pand x0, x3; \
pxor x4, x0; \
pxor x2, x3; \
por x4, x2; \
pxor x1, x0; \
pxor x3, x4; \
por x0, x2; \
pxor x1, x2; \
pand x0, x1; \
pxor x4, x1; \
pand x2, x4; \
pxor x3, x2; \
pxor x0, x4; \
por x1, x3; \
pxor RNOT, x1; \
pxor x0, x3;
#define S5(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
por x0, x1; \
pxor x1, x2; \
pxor RNOT, x3; \
pxor x0, x4; \
pxor x2, x0; \
pand x4, x1; \
por x3, x4; \
pxor x0, x4; \
pand x3, x0; \
pxor x3, x1; \
pxor x2, x3; \
pxor x1, x0; \
pand x4, x2; \
pxor x2, x1; \
pand x0, x2; \
pxor x2, x3;
#define S6(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
pxor x0, x3; \
pxor x2, x1; \
pxor x0, x2; \
pand x3, x0; \
por x3, x1; \
pxor RNOT, x4; \
pxor x1, x0; \
pxor x2, x1; \
pxor x4, x3; \
pxor x0, x4; \
pand x0, x2; \
pxor x1, x4; \
pxor x3, x2; \
pand x1, x3; \
pxor x0, x3; \
pxor x2, x1;
#define S7(x0, x1, x2, x3, x4) \
pxor RNOT, x1; \
movdqa x1, x4; \
pxor RNOT, x0; \
pand x2, x1; \
pxor x3, x1; \
por x4, x3; \
pxor x2, x4; \
pxor x3, x2; \
pxor x0, x3; \
por x1, x0; \
pand x0, x2; \
pxor x4, x0; \
pxor x3, x4; \
pand x0, x3; \
pxor x1, x4; \
pxor x4, x2; \
pxor x1, x3; \
por x0, x4; \
pxor x1, x4;
#define SI0(x0, x1, x2, x3, x4) \
movdqa x3, x4; \
pxor x0, x1; \
por x1, x3; \
pxor x1, x4; \
pxor RNOT, x0; \
pxor x3, x2; \
pxor x0, x3; \
pand x1, x0; \
pxor x2, x0; \
pand x3, x2; \
pxor x4, x3; \
pxor x3, x2; \
pxor x3, x1; \
pand x0, x3; \
pxor x0, x1; \
pxor x2, x0; \
pxor x3, x4;
#define SI1(x0, x1, x2, x3, x4) \
pxor x3, x1; \
movdqa x0, x4; \
pxor x2, x0; \
pxor RNOT, x2; \
por x1, x4; \
pxor x3, x4; \
pand x1, x3; \
pxor x2, x1; \
pand x4, x2; \
pxor x1, x4; \
por x3, x1; \
pxor x0, x3; \
pxor x0, x2; \
por x4, x0; \
pxor x4, x2; \
pxor x0, x1; \
pxor x1, x4;
#define SI2(x0, x1, x2, x3, x4) \
pxor x1, x2; \
movdqa x3, x4; \
pxor RNOT, x3; \
por x2, x3; \
pxor x4, x2; \
pxor x0, x4; \
pxor x1, x3; \
por x2, x1; \
pxor x0, x2; \
pxor x4, x1; \
por x3, x4; \
pxor x3, x2; \
pxor x2, x4; \
pand x1, x2; \
pxor x3, x2; \
pxor x4, x3; \
pxor x0, x4;
#define SI3(x0, x1, x2, x3, x4) \
pxor x1, x2; \
movdqa x1, x4; \
pand x2, x1; \
pxor x0, x1; \
por x4, x0; \
pxor x3, x4; \
pxor x3, x0; \
por x1, x3; \
pxor x2, x1; \
pxor x3, x1; \
pxor x2, x0; \
pxor x3, x2; \
pand x1, x3; \
pxor x0, x1; \
pand x2, x0; \
pxor x3, x4; \
pxor x0, x3; \
pxor x1, x0;
#define SI4(x0, x1, x2, x3, x4) \
pxor x3, x2; \
movdqa x0, x4; \
pand x1, x0; \
pxor x2, x0; \
por x3, x2; \
pxor RNOT, x4; \
pxor x0, x1; \
pxor x2, x0; \
pand x4, x2; \
pxor x0, x2; \
por x4, x0; \
pxor x3, x0; \
pand x2, x3; \
pxor x3, x4; \
pxor x1, x3; \
pand x0, x1; \
pxor x1, x4; \
pxor x3, x0;
#define SI5(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
por x2, x1; \
pxor x4, x2; \
pxor x3, x1; \
pand x4, x3; \
pxor x3, x2; \
por x0, x3; \
pxor RNOT, x0; \
pxor x2, x3; \
por x0, x2; \
pxor x1, x4; \
pxor x4, x2; \
pand x0, x4; \
pxor x1, x0; \
pxor x3, x1; \
pand x2, x0; \
pxor x3, x2; \
pxor x2, x0; \
pxor x4, x2; \
pxor x3, x4;
#define SI6(x0, x1, x2, x3, x4) \
pxor x2, x0; \
movdqa x0, x4; \
pand x3, x0; \
pxor x3, x2; \
pxor x2, x0; \
pxor x1, x3; \
por x4, x2; \
pxor x3, x2; \
pand x0, x3; \
pxor RNOT, x0; \
pxor x1, x3; \
pand x2, x1; \
pxor x0, x4; \
pxor x4, x3; \
pxor x2, x4; \
pxor x1, x0; \
pxor x0, x2;
#define SI7(x0, x1, x2, x3, x4) \
movdqa x3, x4; \
pand x0, x3; \
pxor x2, x0; \
por x4, x2; \
pxor x1, x4; \
pxor RNOT, x0; \
por x3, x1; \
pxor x0, x4; \
pand x2, x0; \
pxor x1, x0; \
pand x2, x1; \
pxor x2, x3; \
pxor x3, x4; \
pand x3, x2; \
por x0, x3; \
pxor x4, x1; \
pxor x4, x3; \
pand x0, x4; \
pxor x2, x4;
#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
movdqa x2, t3; \
movdqa x0, t1; \
unpcklps x3, t3; \
movdqa x0, t2; \
unpcklps x1, t1; \
unpckhps x1, t2; \
movdqa t3, x1; \
unpckhps x3, x2; \
movdqa t1, x0; \
movhlps t1, x1; \
movdqa t2, t1; \
movlhps t3, x0; \
movlhps x2, t1; \
movhlps t2, x2; \
movdqa x2, x3; \
movdqa t1, x2;
#define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \
movdqu (0*4*4)(in), x0; \
movdqu (1*4*4)(in), x1; \
movdqu (2*4*4)(in), x2; \
movdqu (3*4*4)(in), x3; \
\
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
#define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
movdqu x0, (0*4*4)(out); \
movdqu x1, (1*4*4)(out); \
movdqu x2, (2*4*4)(out); \
movdqu x3, (3*4*4)(out);
#define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
movdqu (0*4*4)(out), t0; \
pxor t0, x0; \
movdqu x0, (0*4*4)(out); \
movdqu (1*4*4)(out), t0; \
pxor t0, x1; \
movdqu x1, (1*4*4)(out); \
movdqu (2*4*4)(out), t0; \
pxor t0, x2; \
movdqu x2, (2*4*4)(out); \
movdqu (3*4*4)(out), t0; \
pxor t0, x3; \
movdqu x3, (3*4*4)(out);
.align 8
.global __serpent_enc_blk_4way
.type __serpent_enc_blk_4way,@function;
__serpent_enc_blk_4way:
/* input:
* arg_ctx(%esp): ctx, CTX
* arg_dst(%esp): dst
* arg_src(%esp): src
* arg_xor(%esp): bool, if true: xor output
*/
pcmpeqd RNOT, RNOT;
movl arg_ctx(%esp), CTX;
movl arg_src(%esp), %eax;
read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
K(RA, RB, RC, RD, RE, 0);
S0(RA, RB, RC, RD, RE); LK(RC, RB, RD, RA, RE, 1);
S1(RC, RB, RD, RA, RE); LK(RE, RD, RA, RC, RB, 2);
S2(RE, RD, RA, RC, RB); LK(RB, RD, RE, RC, RA, 3);
S3(RB, RD, RE, RC, RA); LK(RC, RA, RD, RB, RE, 4);
S4(RC, RA, RD, RB, RE); LK(RA, RD, RB, RE, RC, 5);
S5(RA, RD, RB, RE, RC); LK(RC, RA, RD, RE, RB, 6);
S6(RC, RA, RD, RE, RB); LK(RD, RB, RA, RE, RC, 7);
S7(RD, RB, RA, RE, RC); LK(RC, RA, RE, RD, RB, 8);
S0(RC, RA, RE, RD, RB); LK(RE, RA, RD, RC, RB, 9);
S1(RE, RA, RD, RC, RB); LK(RB, RD, RC, RE, RA, 10);
S2(RB, RD, RC, RE, RA); LK(RA, RD, RB, RE, RC, 11);
S3(RA, RD, RB, RE, RC); LK(RE, RC, RD, RA, RB, 12);
S4(RE, RC, RD, RA, RB); LK(RC, RD, RA, RB, RE, 13);
S5(RC, RD, RA, RB, RE); LK(RE, RC, RD, RB, RA, 14);
S6(RE, RC, RD, RB, RA); LK(RD, RA, RC, RB, RE, 15);
S7(RD, RA, RC, RB, RE); LK(RE, RC, RB, RD, RA, 16);
S0(RE, RC, RB, RD, RA); LK(RB, RC, RD, RE, RA, 17);
S1(RB, RC, RD, RE, RA); LK(RA, RD, RE, RB, RC, 18);
S2(RA, RD, RE, RB, RC); LK(RC, RD, RA, RB, RE, 19);
S3(RC, RD, RA, RB, RE); LK(RB, RE, RD, RC, RA, 20);
S4(RB, RE, RD, RC, RA); LK(RE, RD, RC, RA, RB, 21);
S5(RE, RD, RC, RA, RB); LK(RB, RE, RD, RA, RC, 22);
S6(RB, RE, RD, RA, RC); LK(RD, RC, RE, RA, RB, 23);
S7(RD, RC, RE, RA, RB); LK(RB, RE, RA, RD, RC, 24);
S0(RB, RE, RA, RD, RC); LK(RA, RE, RD, RB, RC, 25);
S1(RA, RE, RD, RB, RC); LK(RC, RD, RB, RA, RE, 26);
S2(RC, RD, RB, RA, RE); LK(RE, RD, RC, RA, RB, 27);
S3(RE, RD, RC, RA, RB); LK(RA, RB, RD, RE, RC, 28);
S4(RA, RB, RD, RE, RC); LK(RB, RD, RE, RC, RA, 29);
S5(RB, RD, RE, RC, RA); LK(RA, RB, RD, RC, RE, 30);
S6(RA, RB, RD, RC, RE); LK(RD, RE, RB, RC, RA, 31);
S7(RD, RE, RB, RC, RA); K(RA, RB, RC, RD, RE, 32);
movl arg_dst(%esp), %eax;
cmpb $0, arg_xor(%esp);
jnz __enc_xor4;
write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
ret;
__enc_xor4:
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
ret;
.align 8
.global serpent_dec_blk_4way
.type serpent_dec_blk_4way,@function;
serpent_dec_blk_4way:
/* input:
* arg_ctx(%esp): ctx, CTX
* arg_dst(%esp): dst
* arg_src(%esp): src
*/
pcmpeqd RNOT, RNOT;
movl arg_ctx(%esp), CTX;
movl arg_src(%esp), %eax;
read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
K(RA, RB, RC, RD, RE, 32);
SI7(RA, RB, RC, RD, RE); KL(RB, RD, RA, RE, RC, 31);
SI6(RB, RD, RA, RE, RC); KL(RA, RC, RE, RB, RD, 30);
SI5(RA, RC, RE, RB, RD); KL(RC, RD, RA, RE, RB, 29);
SI4(RC, RD, RA, RE, RB); KL(RC, RA, RB, RE, RD, 28);
SI3(RC, RA, RB, RE, RD); KL(RB, RC, RD, RE, RA, 27);
SI2(RB, RC, RD, RE, RA); KL(RC, RA, RE, RD, RB, 26);
SI1(RC, RA, RE, RD, RB); KL(RB, RA, RE, RD, RC, 25);
SI0(RB, RA, RE, RD, RC); KL(RE, RC, RA, RB, RD, 24);
SI7(RE, RC, RA, RB, RD); KL(RC, RB, RE, RD, RA, 23);
SI6(RC, RB, RE, RD, RA); KL(RE, RA, RD, RC, RB, 22);
SI5(RE, RA, RD, RC, RB); KL(RA, RB, RE, RD, RC, 21);
SI4(RA, RB, RE, RD, RC); KL(RA, RE, RC, RD, RB, 20);
SI3(RA, RE, RC, RD, RB); KL(RC, RA, RB, RD, RE, 19);
SI2(RC, RA, RB, RD, RE); KL(RA, RE, RD, RB, RC, 18);
SI1(RA, RE, RD, RB, RC); KL(RC, RE, RD, RB, RA, 17);
SI0(RC, RE, RD, RB, RA); KL(RD, RA, RE, RC, RB, 16);
SI7(RD, RA, RE, RC, RB); KL(RA, RC, RD, RB, RE, 15);
SI6(RA, RC, RD, RB, RE); KL(RD, RE, RB, RA, RC, 14);
SI5(RD, RE, RB, RA, RC); KL(RE, RC, RD, RB, RA, 13);
SI4(RE, RC, RD, RB, RA); KL(RE, RD, RA, RB, RC, 12);
SI3(RE, RD, RA, RB, RC); KL(RA, RE, RC, RB, RD, 11);
SI2(RA, RE, RC, RB, RD); KL(RE, RD, RB, RC, RA, 10);
SI1(RE, RD, RB, RC, RA); KL(RA, RD, RB, RC, RE, 9);
SI0(RA, RD, RB, RC, RE); KL(RB, RE, RD, RA, RC, 8);
SI7(RB, RE, RD, RA, RC); KL(RE, RA, RB, RC, RD, 7);
SI6(RE, RA, RB, RC, RD); KL(RB, RD, RC, RE, RA, 6);
SI5(RB, RD, RC, RE, RA); KL(RD, RA, RB, RC, RE, 5);
SI4(RD, RA, RB, RC, RE); KL(RD, RB, RE, RC, RA, 4);
SI3(RD, RB, RE, RC, RA); KL(RE, RD, RA, RC, RB, 3);
SI2(RE, RD, RA, RC, RB); KL(RD, RB, RC, RA, RE, 2);
SI1(RD, RB, RC, RA, RE); KL(RE, RB, RC, RA, RD, 1);
SI0(RE, RB, RC, RA, RD); K(RC, RD, RB, RE, RA, 0);
movl arg_dst(%esp), %eax;
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
ret;

Просмотреть файл

@ -0,0 +1,761 @@
/*
* Serpent Cipher 8-way parallel algorithm (x86_64/SSE2)
*
* Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* Based on crypto/serpent.c by
* Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
* 2003 Herbert Valerio Riedel <hvr@gnu.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
.file "serpent-sse2-x86_64-asm_64.S"
.text
#define CTX %rdi
/**********************************************************************
8-way SSE2 serpent
**********************************************************************/
#define RA1 %xmm0
#define RB1 %xmm1
#define RC1 %xmm2
#define RD1 %xmm3
#define RE1 %xmm4
#define RA2 %xmm5
#define RB2 %xmm6
#define RC2 %xmm7
#define RD2 %xmm8
#define RE2 %xmm9
#define RNOT %xmm10
#define RK0 %xmm11
#define RK1 %xmm12
#define RK2 %xmm13
#define RK3 %xmm14
#define S0_1(x0, x1, x2, x3, x4) \
movdqa x3, x4; \
por x0, x3; \
pxor x4, x0; \
pxor x2, x4; \
pxor RNOT, x4; \
pxor x1, x3; \
pand x0, x1; \
pxor x4, x1; \
pxor x0, x2;
#define S0_2(x0, x1, x2, x3, x4) \
pxor x3, x0; \
por x0, x4; \
pxor x2, x0; \
pand x1, x2; \
pxor x2, x3; \
pxor RNOT, x1; \
pxor x4, x2; \
pxor x2, x1;
#define S1_1(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
pxor x0, x1; \
pxor x3, x0; \
pxor RNOT, x3; \
pand x1, x4; \
por x1, x0; \
pxor x2, x3; \
pxor x3, x0; \
pxor x3, x1;
#define S1_2(x0, x1, x2, x3, x4) \
pxor x4, x3; \
por x4, x1; \
pxor x2, x4; \
pand x0, x2; \
pxor x1, x2; \
por x0, x1; \
pxor RNOT, x0; \
pxor x2, x0; \
pxor x1, x4;
#define S2_1(x0, x1, x2, x3, x4) \
pxor RNOT, x3; \
pxor x0, x1; \
movdqa x0, x4; \
pand x2, x0; \
pxor x3, x0; \
por x4, x3; \
pxor x1, x2; \
pxor x1, x3; \
pand x0, x1;
#define S2_2(x0, x1, x2, x3, x4) \
pxor x2, x0; \
pand x3, x2; \
por x1, x3; \
pxor RNOT, x0; \
pxor x0, x3; \
pxor x0, x4; \
pxor x2, x0; \
por x2, x1;
#define S3_1(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
pxor x3, x1; \
por x0, x3; \
pand x0, x4; \
pxor x2, x0; \
pxor x1, x2; \
pand x3, x1; \
pxor x3, x2; \
por x4, x0; \
pxor x3, x4;
#define S3_2(x0, x1, x2, x3, x4) \
pxor x0, x1; \
pand x3, x0; \
pand x4, x3; \
pxor x2, x3; \
por x1, x4; \
pand x1, x2; \
pxor x3, x4; \
pxor x3, x0; \
pxor x2, x3;
#define S4_1(x0, x1, x2, x3, x4) \
movdqa x3, x4; \
pand x0, x3; \
pxor x4, x0; \
pxor x2, x3; \
por x4, x2; \
pxor x1, x0; \
pxor x3, x4; \
por x0, x2; \
pxor x1, x2;
#define S4_2(x0, x1, x2, x3, x4) \
pand x0, x1; \
pxor x4, x1; \
pand x2, x4; \
pxor x3, x2; \
pxor x0, x4; \
por x1, x3; \
pxor RNOT, x1; \
pxor x0, x3;
#define S5_1(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
por x0, x1; \
pxor x1, x2; \
pxor RNOT, x3; \
pxor x0, x4; \
pxor x2, x0; \
pand x4, x1; \
por x3, x4; \
pxor x0, x4;
#define S5_2(x0, x1, x2, x3, x4) \
pand x3, x0; \
pxor x3, x1; \
pxor x2, x3; \
pxor x1, x0; \
pand x4, x2; \
pxor x2, x1; \
pand x0, x2; \
pxor x2, x3;
#define S6_1(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
pxor x0, x3; \
pxor x2, x1; \
pxor x0, x2; \
pand x3, x0; \
por x3, x1; \
pxor RNOT, x4; \
pxor x1, x0; \
pxor x2, x1;
#define S6_2(x0, x1, x2, x3, x4) \
pxor x4, x3; \
pxor x0, x4; \
pand x0, x2; \
pxor x1, x4; \
pxor x3, x2; \
pand x1, x3; \
pxor x0, x3; \
pxor x2, x1;
#define S7_1(x0, x1, x2, x3, x4) \
pxor RNOT, x1; \
movdqa x1, x4; \
pxor RNOT, x0; \
pand x2, x1; \
pxor x3, x1; \
por x4, x3; \
pxor x2, x4; \
pxor x3, x2; \
pxor x0, x3; \
por x1, x0;
#define S7_2(x0, x1, x2, x3, x4) \
pand x0, x2; \
pxor x4, x0; \
pxor x3, x4; \
pand x0, x3; \
pxor x1, x4; \
pxor x4, x2; \
pxor x1, x3; \
por x0, x4; \
pxor x1, x4;
#define SI0_1(x0, x1, x2, x3, x4) \
movdqa x3, x4; \
pxor x0, x1; \
por x1, x3; \
pxor x1, x4; \
pxor RNOT, x0; \
pxor x3, x2; \
pxor x0, x3; \
pand x1, x0; \
pxor x2, x0;
#define SI0_2(x0, x1, x2, x3, x4) \
pand x3, x2; \
pxor x4, x3; \
pxor x3, x2; \
pxor x3, x1; \
pand x0, x3; \
pxor x0, x1; \
pxor x2, x0; \
pxor x3, x4;
#define SI1_1(x0, x1, x2, x3, x4) \
pxor x3, x1; \
movdqa x0, x4; \
pxor x2, x0; \
pxor RNOT, x2; \
por x1, x4; \
pxor x3, x4; \
pand x1, x3; \
pxor x2, x1; \
pand x4, x2;
#define SI1_2(x0, x1, x2, x3, x4) \
pxor x1, x4; \
por x3, x1; \
pxor x0, x3; \
pxor x0, x2; \
por x4, x0; \
pxor x4, x2; \
pxor x0, x1; \
pxor x1, x4;
#define SI2_1(x0, x1, x2, x3, x4) \
pxor x1, x2; \
movdqa x3, x4; \
pxor RNOT, x3; \
por x2, x3; \
pxor x4, x2; \
pxor x0, x4; \
pxor x1, x3; \
por x2, x1; \
pxor x0, x2;
#define SI2_2(x0, x1, x2, x3, x4) \
pxor x4, x1; \
por x3, x4; \
pxor x3, x2; \
pxor x2, x4; \
pand x1, x2; \
pxor x3, x2; \
pxor x4, x3; \
pxor x0, x4;
#define SI3_1(x0, x1, x2, x3, x4) \
pxor x1, x2; \
movdqa x1, x4; \
pand x2, x1; \
pxor x0, x1; \
por x4, x0; \
pxor x3, x4; \
pxor x3, x0; \
por x1, x3; \
pxor x2, x1;
#define SI3_2(x0, x1, x2, x3, x4) \
pxor x3, x1; \
pxor x2, x0; \
pxor x3, x2; \
pand x1, x3; \
pxor x0, x1; \
pand x2, x0; \
pxor x3, x4; \
pxor x0, x3; \
pxor x1, x0;
#define SI4_1(x0, x1, x2, x3, x4) \
pxor x3, x2; \
movdqa x0, x4; \
pand x1, x0; \
pxor x2, x0; \
por x3, x2; \
pxor RNOT, x4; \
pxor x0, x1; \
pxor x2, x0; \
pand x4, x2;
#define SI4_2(x0, x1, x2, x3, x4) \
pxor x0, x2; \
por x4, x0; \
pxor x3, x0; \
pand x2, x3; \
pxor x3, x4; \
pxor x1, x3; \
pand x0, x1; \
pxor x1, x4; \
pxor x3, x0;
#define SI5_1(x0, x1, x2, x3, x4) \
movdqa x1, x4; \
por x2, x1; \
pxor x4, x2; \
pxor x3, x1; \
pand x4, x3; \
pxor x3, x2; \
por x0, x3; \
pxor RNOT, x0; \
pxor x2, x3; \
por x0, x2;
#define SI5_2(x0, x1, x2, x3, x4) \
pxor x1, x4; \
pxor x4, x2; \
pand x0, x4; \
pxor x1, x0; \
pxor x3, x1; \
pand x2, x0; \
pxor x3, x2; \
pxor x2, x0; \
pxor x4, x2; \
pxor x3, x4;
#define SI6_1(x0, x1, x2, x3, x4) \
pxor x2, x0; \
movdqa x0, x4; \
pand x3, x0; \
pxor x3, x2; \
pxor x2, x0; \
pxor x1, x3; \
por x4, x2; \
pxor x3, x2; \
pand x0, x3;
#define SI6_2(x0, x1, x2, x3, x4) \
pxor RNOT, x0; \
pxor x1, x3; \
pand x2, x1; \
pxor x0, x4; \
pxor x4, x3; \
pxor x2, x4; \
pxor x1, x0; \
pxor x0, x2;
#define SI7_1(x0, x1, x2, x3, x4) \
movdqa x3, x4; \
pand x0, x3; \
pxor x2, x0; \
por x4, x2; \
pxor x1, x4; \
pxor RNOT, x0; \
por x3, x1; \
pxor x0, x4; \
pand x2, x0; \
pxor x1, x0;
#define SI7_2(x0, x1, x2, x3, x4) \
pand x2, x1; \
pxor x2, x3; \
pxor x3, x4; \
pand x3, x2; \
por x0, x3; \
pxor x4, x1; \
pxor x4, x3; \
pand x0, x4; \
pxor x2, x4;
#define get_key(i, j, t) \
movd (4*(i)+(j))*4(CTX), t; \
pshufd $0, t, t;
#define K2(x0, x1, x2, x3, x4, i) \
get_key(i, 0, RK0); \
get_key(i, 1, RK1); \
get_key(i, 2, RK2); \
get_key(i, 3, RK3); \
pxor RK0, x0 ## 1; \
pxor RK1, x1 ## 1; \
pxor RK2, x2 ## 1; \
pxor RK3, x3 ## 1; \
pxor RK0, x0 ## 2; \
pxor RK1, x1 ## 2; \
pxor RK2, x2 ## 2; \
pxor RK3, x3 ## 2;
#define LK2(x0, x1, x2, x3, x4, i) \
movdqa x0 ## 1, x4 ## 1; \
pslld $13, x0 ## 1; \
psrld $(32 - 13), x4 ## 1; \
por x4 ## 1, x0 ## 1; \
pxor x0 ## 1, x1 ## 1; \
movdqa x2 ## 1, x4 ## 1; \
pslld $3, x2 ## 1; \
psrld $(32 - 3), x4 ## 1; \
por x4 ## 1, x2 ## 1; \
pxor x2 ## 1, x1 ## 1; \
movdqa x0 ## 2, x4 ## 2; \
pslld $13, x0 ## 2; \
psrld $(32 - 13), x4 ## 2; \
por x4 ## 2, x0 ## 2; \
pxor x0 ## 2, x1 ## 2; \
movdqa x2 ## 2, x4 ## 2; \
pslld $3, x2 ## 2; \
psrld $(32 - 3), x4 ## 2; \
por x4 ## 2, x2 ## 2; \
pxor x2 ## 2, x1 ## 2; \
movdqa x1 ## 1, x4 ## 1; \
pslld $1, x1 ## 1; \
psrld $(32 - 1), x4 ## 1; \
por x4 ## 1, x1 ## 1; \
movdqa x0 ## 1, x4 ## 1; \
pslld $3, x4 ## 1; \
pxor x2 ## 1, x3 ## 1; \
pxor x4 ## 1, x3 ## 1; \
movdqa x3 ## 1, x4 ## 1; \
get_key(i, 1, RK1); \
movdqa x1 ## 2, x4 ## 2; \
pslld $1, x1 ## 2; \
psrld $(32 - 1), x4 ## 2; \
por x4 ## 2, x1 ## 2; \
movdqa x0 ## 2, x4 ## 2; \
pslld $3, x4 ## 2; \
pxor x2 ## 2, x3 ## 2; \
pxor x4 ## 2, x3 ## 2; \
movdqa x3 ## 2, x4 ## 2; \
get_key(i, 3, RK3); \
pslld $7, x3 ## 1; \
psrld $(32 - 7), x4 ## 1; \
por x4 ## 1, x3 ## 1; \
movdqa x1 ## 1, x4 ## 1; \
pslld $7, x4 ## 1; \
pxor x1 ## 1, x0 ## 1; \
pxor x3 ## 1, x0 ## 1; \
pxor x3 ## 1, x2 ## 1; \
pxor x4 ## 1, x2 ## 1; \
get_key(i, 0, RK0); \
pslld $7, x3 ## 2; \
psrld $(32 - 7), x4 ## 2; \
por x4 ## 2, x3 ## 2; \
movdqa x1 ## 2, x4 ## 2; \
pslld $7, x4 ## 2; \
pxor x1 ## 2, x0 ## 2; \
pxor x3 ## 2, x0 ## 2; \
pxor x3 ## 2, x2 ## 2; \
pxor x4 ## 2, x2 ## 2; \
get_key(i, 2, RK2); \
pxor RK1, x1 ## 1; \
pxor RK3, x3 ## 1; \
movdqa x0 ## 1, x4 ## 1; \
pslld $5, x0 ## 1; \
psrld $(32 - 5), x4 ## 1; \
por x4 ## 1, x0 ## 1; \
movdqa x2 ## 1, x4 ## 1; \
pslld $22, x2 ## 1; \
psrld $(32 - 22), x4 ## 1; \
por x4 ## 1, x2 ## 1; \
pxor RK0, x0 ## 1; \
pxor RK2, x2 ## 1; \
pxor RK1, x1 ## 2; \
pxor RK3, x3 ## 2; \
movdqa x0 ## 2, x4 ## 2; \
pslld $5, x0 ## 2; \
psrld $(32 - 5), x4 ## 2; \
por x4 ## 2, x0 ## 2; \
movdqa x2 ## 2, x4 ## 2; \
pslld $22, x2 ## 2; \
psrld $(32 - 22), x4 ## 2; \
por x4 ## 2, x2 ## 2; \
pxor RK0, x0 ## 2; \
pxor RK2, x2 ## 2;
#define KL2(x0, x1, x2, x3, x4, i) \
pxor RK0, x0 ## 1; \
pxor RK2, x2 ## 1; \
movdqa x0 ## 1, x4 ## 1; \
psrld $5, x0 ## 1; \
pslld $(32 - 5), x4 ## 1; \
por x4 ## 1, x0 ## 1; \
pxor RK3, x3 ## 1; \
pxor RK1, x1 ## 1; \
movdqa x2 ## 1, x4 ## 1; \
psrld $22, x2 ## 1; \
pslld $(32 - 22), x4 ## 1; \
por x4 ## 1, x2 ## 1; \
pxor x3 ## 1, x2 ## 1; \
pxor RK0, x0 ## 2; \
pxor RK2, x2 ## 2; \
movdqa x0 ## 2, x4 ## 2; \
psrld $5, x0 ## 2; \
pslld $(32 - 5), x4 ## 2; \
por x4 ## 2, x0 ## 2; \
pxor RK3, x3 ## 2; \
pxor RK1, x1 ## 2; \
movdqa x2 ## 2, x4 ## 2; \
psrld $22, x2 ## 2; \
pslld $(32 - 22), x4 ## 2; \
por x4 ## 2, x2 ## 2; \
pxor x3 ## 2, x2 ## 2; \
pxor x3 ## 1, x0 ## 1; \
movdqa x1 ## 1, x4 ## 1; \
pslld $7, x4 ## 1; \
pxor x1 ## 1, x0 ## 1; \
pxor x4 ## 1, x2 ## 1; \
movdqa x1 ## 1, x4 ## 1; \
psrld $1, x1 ## 1; \
pslld $(32 - 1), x4 ## 1; \
por x4 ## 1, x1 ## 1; \
pxor x3 ## 2, x0 ## 2; \
movdqa x1 ## 2, x4 ## 2; \
pslld $7, x4 ## 2; \
pxor x1 ## 2, x0 ## 2; \
pxor x4 ## 2, x2 ## 2; \
movdqa x1 ## 2, x4 ## 2; \
psrld $1, x1 ## 2; \
pslld $(32 - 1), x4 ## 2; \
por x4 ## 2, x1 ## 2; \
movdqa x3 ## 1, x4 ## 1; \
psrld $7, x3 ## 1; \
pslld $(32 - 7), x4 ## 1; \
por x4 ## 1, x3 ## 1; \
pxor x0 ## 1, x1 ## 1; \
movdqa x0 ## 1, x4 ## 1; \
pslld $3, x4 ## 1; \
pxor x4 ## 1, x3 ## 1; \
movdqa x0 ## 1, x4 ## 1; \
movdqa x3 ## 2, x4 ## 2; \
psrld $7, x3 ## 2; \
pslld $(32 - 7), x4 ## 2; \
por x4 ## 2, x3 ## 2; \
pxor x0 ## 2, x1 ## 2; \
movdqa x0 ## 2, x4 ## 2; \
pslld $3, x4 ## 2; \
pxor x4 ## 2, x3 ## 2; \
movdqa x0 ## 2, x4 ## 2; \
psrld $13, x0 ## 1; \
pslld $(32 - 13), x4 ## 1; \
por x4 ## 1, x0 ## 1; \
pxor x2 ## 1, x1 ## 1; \
pxor x2 ## 1, x3 ## 1; \
movdqa x2 ## 1, x4 ## 1; \
psrld $3, x2 ## 1; \
pslld $(32 - 3), x4 ## 1; \
por x4 ## 1, x2 ## 1; \
psrld $13, x0 ## 2; \
pslld $(32 - 13), x4 ## 2; \
por x4 ## 2, x0 ## 2; \
pxor x2 ## 2, x1 ## 2; \
pxor x2 ## 2, x3 ## 2; \
movdqa x2 ## 2, x4 ## 2; \
psrld $3, x2 ## 2; \
pslld $(32 - 3), x4 ## 2; \
por x4 ## 2, x2 ## 2;
#define S(SBOX, x0, x1, x2, x3, x4) \
SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \
SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2);
#define SP(SBOX, x0, x1, x2, x3, x4, i) \
get_key(i, 0, RK0); \
SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
get_key(i, 2, RK2); \
SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \
get_key(i, 3, RK3); \
SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
get_key(i, 1, RK1); \
SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \
#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
movdqa x2, t3; \
movdqa x0, t1; \
unpcklps x3, t3; \
movdqa x0, t2; \
unpcklps x1, t1; \
unpckhps x1, t2; \
movdqa t3, x1; \
unpckhps x3, x2; \
movdqa t1, x0; \
movhlps t1, x1; \
movdqa t2, t1; \
movlhps t3, x0; \
movlhps x2, t1; \
movhlps t2, x2; \
movdqa x2, x3; \
movdqa t1, x2;
#define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \
movdqu (0*4*4)(in), x0; \
movdqu (1*4*4)(in), x1; \
movdqu (2*4*4)(in), x2; \
movdqu (3*4*4)(in), x3; \
\
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
#define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
movdqu x0, (0*4*4)(out); \
movdqu x1, (1*4*4)(out); \
movdqu x2, (2*4*4)(out); \
movdqu x3, (3*4*4)(out);
#define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
movdqu (0*4*4)(out), t0; \
pxor t0, x0; \
movdqu x0, (0*4*4)(out); \
movdqu (1*4*4)(out), t0; \
pxor t0, x1; \
movdqu x1, (1*4*4)(out); \
movdqu (2*4*4)(out), t0; \
pxor t0, x2; \
movdqu x2, (2*4*4)(out); \
movdqu (3*4*4)(out), t0; \
pxor t0, x3; \
movdqu x3, (3*4*4)(out);
.align 8
.global __serpent_enc_blk_8way
.type __serpent_enc_blk_8way,@function;
__serpent_enc_blk_8way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
pcmpeqd RNOT, RNOT;
leaq (4*4*4)(%rdx), %rax;
read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
K2(RA, RB, RC, RD, RE, 0);
S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1);
S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2);
S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3);
S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4);
S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5);
S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6);
S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7);
S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8);
S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9);
S(S1, RE, RA, RD, RC, RB); LK2(RB, RD, RC, RE, RA, 10);
S(S2, RB, RD, RC, RE, RA); LK2(RA, RD, RB, RE, RC, 11);
S(S3, RA, RD, RB, RE, RC); LK2(RE, RC, RD, RA, RB, 12);
S(S4, RE, RC, RD, RA, RB); LK2(RC, RD, RA, RB, RE, 13);
S(S5, RC, RD, RA, RB, RE); LK2(RE, RC, RD, RB, RA, 14);
S(S6, RE, RC, RD, RB, RA); LK2(RD, RA, RC, RB, RE, 15);
S(S7, RD, RA, RC, RB, RE); LK2(RE, RC, RB, RD, RA, 16);
S(S0, RE, RC, RB, RD, RA); LK2(RB, RC, RD, RE, RA, 17);
S(S1, RB, RC, RD, RE, RA); LK2(RA, RD, RE, RB, RC, 18);
S(S2, RA, RD, RE, RB, RC); LK2(RC, RD, RA, RB, RE, 19);
S(S3, RC, RD, RA, RB, RE); LK2(RB, RE, RD, RC, RA, 20);
S(S4, RB, RE, RD, RC, RA); LK2(RE, RD, RC, RA, RB, 21);
S(S5, RE, RD, RC, RA, RB); LK2(RB, RE, RD, RA, RC, 22);
S(S6, RB, RE, RD, RA, RC); LK2(RD, RC, RE, RA, RB, 23);
S(S7, RD, RC, RE, RA, RB); LK2(RB, RE, RA, RD, RC, 24);
S(S0, RB, RE, RA, RD, RC); LK2(RA, RE, RD, RB, RC, 25);
S(S1, RA, RE, RD, RB, RC); LK2(RC, RD, RB, RA, RE, 26);
S(S2, RC, RD, RB, RA, RE); LK2(RE, RD, RC, RA, RB, 27);
S(S3, RE, RD, RC, RA, RB); LK2(RA, RB, RD, RE, RC, 28);
S(S4, RA, RB, RD, RE, RC); LK2(RB, RD, RE, RC, RA, 29);
S(S5, RB, RD, RE, RC, RA); LK2(RA, RB, RD, RC, RE, 30);
S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31);
S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32);
leaq (4*4*4)(%rsi), %rax;
testb %cl, %cl;
jnz __enc_xor8;
write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
__enc_xor8:
xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
.align 8
.global serpent_dec_blk_8way
.type serpent_dec_blk_8way,@function;
serpent_dec_blk_8way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
*/
pcmpeqd RNOT, RNOT;
leaq (4*4*4)(%rdx), %rax;
read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
K2(RA, RB, RC, RD, RE, 32);
SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31);
SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30);
SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29);
SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28);
SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27);
SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26);
SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25);
SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24);
SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23);
SP(SI6, RC, RB, RE, RD, RA, 22); KL2(RE, RA, RD, RC, RB, 22);
SP(SI5, RE, RA, RD, RC, RB, 21); KL2(RA, RB, RE, RD, RC, 21);
SP(SI4, RA, RB, RE, RD, RC, 20); KL2(RA, RE, RC, RD, RB, 20);
SP(SI3, RA, RE, RC, RD, RB, 19); KL2(RC, RA, RB, RD, RE, 19);
SP(SI2, RC, RA, RB, RD, RE, 18); KL2(RA, RE, RD, RB, RC, 18);
SP(SI1, RA, RE, RD, RB, RC, 17); KL2(RC, RE, RD, RB, RA, 17);
SP(SI0, RC, RE, RD, RB, RA, 16); KL2(RD, RA, RE, RC, RB, 16);
SP(SI7, RD, RA, RE, RC, RB, 15); KL2(RA, RC, RD, RB, RE, 15);
SP(SI6, RA, RC, RD, RB, RE, 14); KL2(RD, RE, RB, RA, RC, 14);
SP(SI5, RD, RE, RB, RA, RC, 13); KL2(RE, RC, RD, RB, RA, 13);
SP(SI4, RE, RC, RD, RB, RA, 12); KL2(RE, RD, RA, RB, RC, 12);
SP(SI3, RE, RD, RA, RB, RC, 11); KL2(RA, RE, RC, RB, RD, 11);
SP(SI2, RA, RE, RC, RB, RD, 10); KL2(RE, RD, RB, RC, RA, 10);
SP(SI1, RE, RD, RB, RC, RA, 9); KL2(RA, RD, RB, RC, RE, 9);
SP(SI0, RA, RD, RB, RC, RE, 8); KL2(RB, RE, RD, RA, RC, 8);
SP(SI7, RB, RE, RD, RA, RC, 7); KL2(RE, RA, RB, RC, RD, 7);
SP(SI6, RE, RA, RB, RC, RD, 6); KL2(RB, RD, RC, RE, RA, 6);
SP(SI5, RB, RD, RC, RE, RA, 5); KL2(RD, RA, RB, RC, RE, 5);
SP(SI4, RD, RA, RB, RC, RE, 4); KL2(RD, RB, RE, RC, RA, 4);
SP(SI3, RD, RB, RE, RC, RA, 3); KL2(RE, RD, RA, RC, RB, 3);
SP(SI2, RE, RD, RA, RC, RB, 2); KL2(RD, RB, RC, RA, RE, 2);
SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1);
S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0);
leaq (4*4*4)(%rsi), %rax;
write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
ret;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -32,6 +32,8 @@
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <crypto/b128ops.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
/* regular block cipher functions from twofish_x86_64 module */
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
@ -432,6 +434,209 @@ static struct crypto_alg blk_ctr_alg = {
},
};
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = TF_BLOCK_SIZE;
struct twofish_ctx *ctx = priv;
int i;
if (nbytes == 3 * bsize) {
twofish_enc_blk_3way(ctx, srcdst, srcdst);
return;
}
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
twofish_enc_blk(ctx, srcdst, srcdst);
}
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = TF_BLOCK_SIZE;
struct twofish_ctx *ctx = priv;
int i;
if (nbytes == 3 * bsize) {
twofish_dec_blk_3way(ctx, srcdst, srcdst);
return;
}
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
twofish_dec_blk(ctx, srcdst, srcdst);
}
struct twofish_lrw_ctx {
struct lrw_table_ctx lrw_table;
struct twofish_ctx twofish_ctx;
};
static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
&tfm->crt_flags);
if (err)
return err;
return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
}
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[3];
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &ctx->twofish_ctx,
.crypt_fn = encrypt_callback,
};
return lrw_crypt(desc, dst, src, nbytes, &req);
}
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[3];
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &ctx->twofish_ctx,
.crypt_fn = decrypt_callback,
};
return lrw_crypt(desc, dst, src, nbytes, &req);
}
static void lrw_exit_tfm(struct crypto_tfm *tfm)
{
struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
lrw_free_table(&ctx->lrw_table);
}
static struct crypto_alg blk_lrw_alg = {
.cra_name = "lrw(twofish)",
.cra_driver_name = "lrw-twofish-3way",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_lrw_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(blk_lrw_alg.cra_list),
.cra_exit = lrw_exit_tfm,
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
.max_keysize = TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = lrw_twofish_setkey,
.encrypt = lrw_encrypt,
.decrypt = lrw_decrypt,
},
},
};
struct twofish_xts_ctx {
struct twofish_ctx tweak_ctx;
struct twofish_ctx crypt_ctx;
};
static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int err;
/* key consists of keys of equal size concatenated, therefore
* the length must be even
*/
if (keylen % 2) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* first half of xts-key is for crypt */
err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
if (err)
return err;
/* second half of xts-key is for tweak */
return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
flags);
}
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[3];
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.tweak_ctx = &ctx->tweak_ctx,
.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
.crypt_ctx = &ctx->crypt_ctx,
.crypt_fn = encrypt_callback,
};
return xts_crypt(desc, dst, src, nbytes, &req);
}
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[3];
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.tweak_ctx = &ctx->tweak_ctx,
.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
.crypt_ctx = &ctx->crypt_ctx,
.crypt_fn = decrypt_callback,
};
return xts_crypt(desc, dst, src, nbytes, &req);
}
static struct crypto_alg blk_xts_alg = {
.cra_name = "xts(twofish)",
.cra_driver_name = "xts-twofish-3way",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_xts_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(blk_xts_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE * 2,
.max_keysize = TF_MAX_KEY_SIZE * 2,
.ivsize = TF_BLOCK_SIZE,
.setkey = xts_twofish_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
},
},
};
int __init init(void)
{
int err;
@ -445,9 +650,20 @@ int __init init(void)
err = crypto_register_alg(&blk_ctr_alg);
if (err)
goto ctr_err;
err = crypto_register_alg(&blk_lrw_alg);
if (err)
goto blk_lrw_err;
err = crypto_register_alg(&blk_xts_alg);
if (err)
goto blk_xts_err;
return 0;
crypto_unregister_alg(&blk_xts_alg);
blk_xts_err:
crypto_unregister_alg(&blk_lrw_alg);
blk_lrw_err:
crypto_unregister_alg(&blk_ctr_alg);
ctr_err:
crypto_unregister_alg(&blk_cbc_alg);
cbc_err:
@ -458,6 +674,8 @@ ecb_err:
void __exit fini(void)
{
crypto_unregister_alg(&blk_xts_alg);
crypto_unregister_alg(&blk_lrw_alg);
crypto_unregister_alg(&blk_ctr_alg);
crypto_unregister_alg(&blk_cbc_alg);
crypto_unregister_alg(&blk_ecb_alg);

Просмотреть файл

@ -0,0 +1,63 @@
#ifndef ASM_X86_SERPENT_H
#define ASM_X86_SERPENT_H
#include <linux/crypto.h>
#include <crypto/serpent.h>
#ifdef CONFIG_X86_32
#define SERPENT_PARALLEL_BLOCKS 4
asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_4way(ctx, dst, src, false);
}
static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_4way(ctx, dst, src, true);
}
static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
serpent_dec_blk_4way(ctx, dst, src);
}
#else
#define SERPENT_PARALLEL_BLOCKS 8
asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_8way(ctx, dst, src, false);
}
static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_8way(ctx, dst, src, true);
}
static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
serpent_dec_blk_8way(ctx, dst, src);
}
#endif
#endif

Просмотреть файл

@ -105,7 +105,7 @@ config CRYPTO_USER
depends on NET
select CRYPTO_MANAGER
help
Userapace configuration for cryptographic instantiations such as
Userspace configuration for cryptographic instantiations such as
cbc(aes).
config CRYPTO_MANAGER_DISABLE_TESTS
@ -117,7 +117,7 @@ config CRYPTO_MANAGER_DISABLE_TESTS
algorithm registration.
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
tristate "GF(2^128) multiplication functions"
help
Efficient table driven implementation of multiplications in the
field GF(2^128). This is needed by some cypher modes. This
@ -241,8 +241,7 @@ config CRYPTO_ECB
the input block by block.
config CRYPTO_LRW
tristate "LRW support (EXPERIMENTAL)"
depends on EXPERIMENTAL
tristate "LRW support"
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
@ -262,8 +261,7 @@ config CRYPTO_PCBC
This block cipher algorithm is required for RxRPC.
config CRYPTO_XTS
tristate "XTS support (EXPERIMENTAL)"
depends on EXPERIMENTAL
tristate "XTS support"
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
@ -764,6 +762,46 @@ config CRYPTO_SERPENT
See also:
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_SSE2_X86_64
tristate "Serpent cipher algorithm (x86_64/SSE2)"
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_SERPENT
select CRYPTO_LRW
select CRYPTO_XTS
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
Keys are allowed to be from 0 to 256 bits in length, in steps
of 8 bits.
This module provides Serpent cipher algorithm that processes eigth
blocks parallel using SSE2 instruction set.
See also:
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_SSE2_586
tristate "Serpent cipher algorithm (i586/SSE2)"
depends on X86 && !64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_SERPENT
select CRYPTO_LRW
select CRYPTO_XTS
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
Keys are allowed to be from 0 to 256 bits in length, in steps
of 8 bits.
This module provides Serpent cipher algorithm that processes four
blocks parallel using SSE2 instruction set.
See also:
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_TEA
tristate "TEA, XTEA and XETA cipher algorithms"
select CRYPTO_ALGAPI
@ -840,6 +878,8 @@ config CRYPTO_TWOFISH_X86_64_3WAY
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
select CRYPTO_LRW
select CRYPTO_XTS
help
Twofish cipher algorithm (x86_64, 3-way parallel).

Просмотреть файл

@ -65,7 +65,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia.o
obj-$(CONFIG_CRYPTO_CAST5) += cast5.o

Просмотреть файл

@ -518,6 +518,35 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
int crypto_unregister_instance(struct crypto_alg *alg)
{
int err;
struct crypto_instance *inst = (void *)alg;
struct crypto_template *tmpl = inst->tmpl;
LIST_HEAD(users);
if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
return -EINVAL;
BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
down_write(&crypto_alg_sem);
hlist_del_init(&inst->list);
err = crypto_remove_alg(alg, &users);
up_write(&crypto_alg_sem);
if (err)
return err;
tmpl->free(inst);
crypto_remove_final(&users);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_unregister_instance);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst, u32 mask)
{

Просмотреть файл

@ -414,10 +414,18 @@ static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
{
u8 rdata[DEFAULT_BLK_SZ];
u8 *key = seed + DEFAULT_BLK_SZ;
int rc;
struct prng_context *prng = crypto_rng_ctx(tfm);
if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
return -EINVAL;
/* fips strictly requires seed != key */
if (!memcmp(seed, key, DEFAULT_PRNG_KSZ))
return -EINVAL;
rc = cprng_reset(tfm, seed, slen);
if (!rc)

Просмотреть файл

@ -298,7 +298,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
if (atomic_read(&alg->cra_refcnt) != 1)
return -EBUSY;
return crypto_unregister_alg(alg);
return crypto_unregister_instance(alg);
}
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,

Просмотреть файл

@ -3,7 +3,7 @@
*
* Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
*
* Based om ecb.c
* Based on ecb.c
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
@ -16,6 +16,7 @@
* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
*
* The test vectors are included in the testing module tcrypt.[ch] */
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
@ -26,21 +27,11 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
#include <crypto/lrw.h>
struct priv {
struct crypto_cipher *child;
/* optimizes multiplying a random (non incrementing, as at the
* start of a new sector) value with key2, we could also have
* used 4k optimization tables or no optimization at all. In the
* latter case we would have to store key2 here */
struct gf128mul_64k *table;
/* stores:
* key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
* key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
* key2*{ 0,0,...1,1,1,1,1 }, etc
* needed for optimized multiplication of incrementing values
* with key2 */
be128 mulinc[128];
struct lrw_table_ctx table;
};
static inline void setbit128_bbe(void *b, int bit)
@ -54,28 +45,16 @@ static inline void setbit128_bbe(void *b, int bit)
), b);
}
static int setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
{
struct priv *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err, i;
be128 tmp = { 0 };
int bsize = crypto_cipher_blocksize(child);
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
if ((err = crypto_cipher_setkey(child, key, keylen - bsize)))
return err;
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
int i;
if (ctx->table)
gf128mul_free_64k(ctx->table);
/* initialize multiplication table for Key2 */
ctx->table = gf128mul_init_64k_bbe((be128 *)(key + keylen - bsize));
ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
if (!ctx->table)
return -ENOMEM;
@ -88,6 +67,34 @@ static int setkey(struct crypto_tfm *parent, const u8 *key,
return 0;
}
EXPORT_SYMBOL_GPL(lrw_init_table);
void lrw_free_table(struct lrw_table_ctx *ctx)
{
if (ctx->table)
gf128mul_free_64k(ctx->table);
}
EXPORT_SYMBOL_GPL(lrw_free_table);
static int setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct priv *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err, bsize = LRW_BLOCK_SIZE;
const u8 *tweak = key + keylen - bsize;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen - bsize);
if (err)
return err;
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return lrw_init_table(&ctx->table, tweak);
}
struct sinfo {
be128 t;
@ -134,7 +141,7 @@ static int crypt(struct blkcipher_desc *d,
{
int err;
unsigned int avail;
const int bs = crypto_cipher_blocksize(ctx->child);
const int bs = LRW_BLOCK_SIZE;
struct sinfo s = {
.tfm = crypto_cipher_tfm(ctx->child),
.fn = fn
@ -155,7 +162,7 @@ static int crypt(struct blkcipher_desc *d,
s.t = *iv;
/* T <- I*Key2 */
gf128mul_64k_bbe(&s.t, ctx->table);
gf128mul_64k_bbe(&s.t, ctx->table.table);
goto first;
@ -163,7 +170,8 @@ static int crypt(struct blkcipher_desc *d,
do {
/* T <- I*Key2, using the optimization
* discussed in the specification */
be128_xor(&s.t, &s.t, &ctx->mulinc[get_index128(iv)]);
be128_xor(&s.t, &s.t,
&ctx->table.mulinc[get_index128(iv)]);
inc(iv);
first:
@ -206,6 +214,85 @@ static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
crypto_cipher_alg(ctx->child)->cia_decrypt);
}
int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
struct scatterlist *ssrc, unsigned int nbytes,
struct lrw_crypt_req *req)
{
const unsigned int bsize = LRW_BLOCK_SIZE;
const unsigned int max_blks = req->tbuflen / bsize;
struct lrw_table_ctx *ctx = req->table_ctx;
struct blkcipher_walk walk;
unsigned int nblocks;
be128 *iv, *src, *dst, *t;
be128 *t_buf = req->tbuf;
int err, i;
BUG_ON(max_blks < 1);
blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
err = blkcipher_walk_virt(desc, &walk);
nbytes = walk.nbytes;
if (!nbytes)
return err;
nblocks = min(walk.nbytes / bsize, max_blks);
src = (be128 *)walk.src.virt.addr;
dst = (be128 *)walk.dst.virt.addr;
/* calculate first value of T */
iv = (be128 *)walk.iv;
t_buf[0] = *iv;
/* T <- I*Key2 */
gf128mul_64k_bbe(&t_buf[0], ctx->table);
i = 0;
goto first;
for (;;) {
do {
for (i = 0; i < nblocks; i++) {
/* T <- I*Key2, using the optimization
* discussed in the specification */
be128_xor(&t_buf[i], t,
&ctx->mulinc[get_index128(iv)]);
inc(iv);
first:
t = &t_buf[i];
/* PP <- T xor P */
be128_xor(dst + i, t, src + i);
}
/* CC <- E(Key2,PP) */
req->crypt_fn(req->crypt_ctx, (u8 *)dst,
nblocks * bsize);
/* C <- T xor CC */
for (i = 0; i < nblocks; i++)
be128_xor(dst + i, dst + i, &t_buf[i]);
src += nblocks;
dst += nblocks;
nbytes -= nblocks * bsize;
nblocks = min(nbytes / bsize, max_blks);
} while (nblocks > 0);
err = blkcipher_walk_done(desc, &walk, nbytes);
nbytes = walk.nbytes;
if (!nbytes)
break;
nblocks = min(nbytes / bsize, max_blks);
src = (be128 *)walk.src.virt.addr;
dst = (be128 *)walk.dst.virt.addr;
}
return err;
}
EXPORT_SYMBOL_GPL(lrw_crypt);
static int init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
@ -218,8 +305,9 @@ static int init_tfm(struct crypto_tfm *tfm)
if (IS_ERR(cipher))
return PTR_ERR(cipher);
if (crypto_cipher_blocksize(cipher) != 16) {
if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) {
*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
crypto_free_cipher(cipher);
return -EINVAL;
}
@ -230,8 +318,8 @@ static int init_tfm(struct crypto_tfm *tfm)
static void exit_tfm(struct crypto_tfm *tfm)
{
struct priv *ctx = crypto_tfm_ctx(tfm);
if (ctx->table)
gf128mul_free_64k(ctx->table);
lrw_free_table(&ctx->table);
crypto_free_cipher(ctx->child);
}

Просмотреть файл

@ -1,587 +0,0 @@
/*
* Cryptographic API.
*
* Serpent Cipher Algorithm.
*
* Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
* 2003 Herbert Valerio Riedel <hvr@gnu.org>
*
* Added tnepres support: Ruben Jesus Garcia Hernandez <ruben@ugr.es>, 18.10.2004
* Based on code by hvr
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
/* Key is padded to the maximum of 256 bits before round key generation.
* Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
*/
#define SERPENT_MIN_KEY_SIZE 0
#define SERPENT_MAX_KEY_SIZE 32
#define SERPENT_EXPKEY_WORDS 132
#define SERPENT_BLOCK_SIZE 16
#define PHI 0x9e3779b9UL
#define keyiter(a,b,c,d,i,j) \
b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b,11); k[j] = b;
#define loadkeys(x0,x1,x2,x3,i) \
x0=k[i]; x1=k[i+1]; x2=k[i+2]; x3=k[i+3];
#define storekeys(x0,x1,x2,x3,i) \
k[i]=x0; k[i+1]=x1; k[i+2]=x2; k[i+3]=x3;
#define K(x0,x1,x2,x3,i) \
x3 ^= k[4*(i)+3]; x2 ^= k[4*(i)+2]; \
x1 ^= k[4*(i)+1]; x0 ^= k[4*(i)+0];
#define LK(x0,x1,x2,x3,x4,i) \
x0=rol32(x0,13);\
x2=rol32(x2,3); x1 ^= x0; x4 = x0 << 3; \
x3 ^= x2; x1 ^= x2; \
x1=rol32(x1,1); x3 ^= x4; \
x3=rol32(x3,7); x4 = x1; \
x0 ^= x1; x4 <<= 7; x2 ^= x3; \
x0 ^= x3; x2 ^= x4; x3 ^= k[4*i+3]; \
x1 ^= k[4*i+1]; x0=rol32(x0,5); x2=rol32(x2,22);\
x0 ^= k[4*i+0]; x2 ^= k[4*i+2];
#define KL(x0,x1,x2,x3,x4,i) \
x0 ^= k[4*i+0]; x1 ^= k[4*i+1]; x2 ^= k[4*i+2]; \
x3 ^= k[4*i+3]; x0=ror32(x0,5); x2=ror32(x2,22);\
x4 = x1; x2 ^= x3; x0 ^= x3; \
x4 <<= 7; x0 ^= x1; x1=ror32(x1,1); \
x2 ^= x4; x3=ror32(x3,7); x4 = x0 << 3; \
x1 ^= x0; x3 ^= x4; x0=ror32(x0,13);\
x1 ^= x2; x3 ^= x2; x2=ror32(x2,3);
#define S0(x0,x1,x2,x3,x4) \
x4 = x3; \
x3 |= x0; x0 ^= x4; x4 ^= x2; \
x4 =~ x4; x3 ^= x1; x1 &= x0; \
x1 ^= x4; x2 ^= x0; x0 ^= x3; \
x4 |= x0; x0 ^= x2; x2 &= x1; \
x3 ^= x2; x1 =~ x1; x2 ^= x4; \
x1 ^= x2;
#define S1(x0,x1,x2,x3,x4) \
x4 = x1; \
x1 ^= x0; x0 ^= x3; x3 =~ x3; \
x4 &= x1; x0 |= x1; x3 ^= x2; \
x0 ^= x3; x1 ^= x3; x3 ^= x4; \
x1 |= x4; x4 ^= x2; x2 &= x0; \
x2 ^= x1; x1 |= x0; x0 =~ x0; \
x0 ^= x2; x4 ^= x1;
#define S2(x0,x1,x2,x3,x4) \
x3 =~ x3; \
x1 ^= x0; x4 = x0; x0 &= x2; \
x0 ^= x3; x3 |= x4; x2 ^= x1; \
x3 ^= x1; x1 &= x0; x0 ^= x2; \
x2 &= x3; x3 |= x1; x0 =~ x0; \
x3 ^= x0; x4 ^= x0; x0 ^= x2; \
x1 |= x2;
#define S3(x0,x1,x2,x3,x4) \
x4 = x1; \
x1 ^= x3; x3 |= x0; x4 &= x0; \
x0 ^= x2; x2 ^= x1; x1 &= x3; \
x2 ^= x3; x0 |= x4; x4 ^= x3; \
x1 ^= x0; x0 &= x3; x3 &= x4; \
x3 ^= x2; x4 |= x1; x2 &= x1; \
x4 ^= x3; x0 ^= x3; x3 ^= x2;
#define S4(x0,x1,x2,x3,x4) \
x4 = x3; \
x3 &= x0; x0 ^= x4; \
x3 ^= x2; x2 |= x4; x0 ^= x1; \
x4 ^= x3; x2 |= x0; \
x2 ^= x1; x1 &= x0; \
x1 ^= x4; x4 &= x2; x2 ^= x3; \
x4 ^= x0; x3 |= x1; x1 =~ x1; \
x3 ^= x0;
#define S5(x0,x1,x2,x3,x4) \
x4 = x1; x1 |= x0; \
x2 ^= x1; x3 =~ x3; x4 ^= x0; \
x0 ^= x2; x1 &= x4; x4 |= x3; \
x4 ^= x0; x0 &= x3; x1 ^= x3; \
x3 ^= x2; x0 ^= x1; x2 &= x4; \
x1 ^= x2; x2 &= x0; \
x3 ^= x2;
#define S6(x0,x1,x2,x3,x4) \
x4 = x1; \
x3 ^= x0; x1 ^= x2; x2 ^= x0; \
x0 &= x3; x1 |= x3; x4 =~ x4; \
x0 ^= x1; x1 ^= x2; \
x3 ^= x4; x4 ^= x0; x2 &= x0; \
x4 ^= x1; x2 ^= x3; x3 &= x1; \
x3 ^= x0; x1 ^= x2;
#define S7(x0,x1,x2,x3,x4) \
x1 =~ x1; \
x4 = x1; x0 =~ x0; x1 &= x2; \
x1 ^= x3; x3 |= x4; x4 ^= x2; \
x2 ^= x3; x3 ^= x0; x0 |= x1; \
x2 &= x0; x0 ^= x4; x4 ^= x3; \
x3 &= x0; x4 ^= x1; \
x2 ^= x4; x3 ^= x1; x4 |= x0; \
x4 ^= x1;
#define SI0(x0,x1,x2,x3,x4) \
x4 = x3; x1 ^= x0; \
x3 |= x1; x4 ^= x1; x0 =~ x0; \
x2 ^= x3; x3 ^= x0; x0 &= x1; \
x0 ^= x2; x2 &= x3; x3 ^= x4; \
x2 ^= x3; x1 ^= x3; x3 &= x0; \
x1 ^= x0; x0 ^= x2; x4 ^= x3;
#define SI1(x0,x1,x2,x3,x4) \
x1 ^= x3; x4 = x0; \
x0 ^= x2; x2 =~ x2; x4 |= x1; \
x4 ^= x3; x3 &= x1; x1 ^= x2; \
x2 &= x4; x4 ^= x1; x1 |= x3; \
x3 ^= x0; x2 ^= x0; x0 |= x4; \
x2 ^= x4; x1 ^= x0; \
x4 ^= x1;
#define SI2(x0,x1,x2,x3,x4) \
x2 ^= x1; x4 = x3; x3 =~ x3; \
x3 |= x2; x2 ^= x4; x4 ^= x0; \
x3 ^= x1; x1 |= x2; x2 ^= x0; \
x1 ^= x4; x4 |= x3; x2 ^= x3; \
x4 ^= x2; x2 &= x1; \
x2 ^= x3; x3 ^= x4; x4 ^= x0;
#define SI3(x0,x1,x2,x3,x4) \
x2 ^= x1; \
x4 = x1; x1 &= x2; \
x1 ^= x0; x0 |= x4; x4 ^= x3; \
x0 ^= x3; x3 |= x1; x1 ^= x2; \
x1 ^= x3; x0 ^= x2; x2 ^= x3; \
x3 &= x1; x1 ^= x0; x0 &= x2; \
x4 ^= x3; x3 ^= x0; x0 ^= x1;
#define SI4(x0,x1,x2,x3,x4) \
x2 ^= x3; x4 = x0; x0 &= x1; \
x0 ^= x2; x2 |= x3; x4 =~ x4; \
x1 ^= x0; x0 ^= x2; x2 &= x4; \
x2 ^= x0; x0 |= x4; \
x0 ^= x3; x3 &= x2; \
x4 ^= x3; x3 ^= x1; x1 &= x0; \
x4 ^= x1; x0 ^= x3;
#define SI5(x0,x1,x2,x3,x4) \
x4 = x1; x1 |= x2; \
x2 ^= x4; x1 ^= x3; x3 &= x4; \
x2 ^= x3; x3 |= x0; x0 =~ x0; \
x3 ^= x2; x2 |= x0; x4 ^= x1; \
x2 ^= x4; x4 &= x0; x0 ^= x1; \
x1 ^= x3; x0 &= x2; x2 ^= x3; \
x0 ^= x2; x2 ^= x4; x4 ^= x3;
#define SI6(x0,x1,x2,x3,x4) \
x0 ^= x2; \
x4 = x0; x0 &= x3; x2 ^= x3; \
x0 ^= x2; x3 ^= x1; x2 |= x4; \
x2 ^= x3; x3 &= x0; x0 =~ x0; \
x3 ^= x1; x1 &= x2; x4 ^= x0; \
x3 ^= x4; x4 ^= x2; x0 ^= x1; \
x2 ^= x0;
#define SI7(x0,x1,x2,x3,x4) \
x4 = x3; x3 &= x0; x0 ^= x2; \
x2 |= x4; x4 ^= x1; x0 =~ x0; \
x1 |= x3; x4 ^= x0; x0 &= x2; \
x0 ^= x1; x1 &= x2; x3 ^= x2; \
x4 ^= x3; x2 &= x3; x3 |= x0; \
x1 ^= x4; x3 ^= x4; x4 &= x0; \
x4 ^= x2;
struct serpent_ctx {
u32 expkey[SERPENT_EXPKEY_WORDS];
};
static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k;
u32 r0,r1,r2,r3,r4;
int i;
/* Copy key, add padding */
for (i = 0; i < keylen; ++i)
k8[i] = key[i];
if (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 1;
while (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 0;
/* Expand key using polynomial */
r0 = le32_to_cpu(k[3]);
r1 = le32_to_cpu(k[4]);
r2 = le32_to_cpu(k[5]);
r3 = le32_to_cpu(k[6]);
r4 = le32_to_cpu(k[7]);
keyiter(le32_to_cpu(k[0]),r0,r4,r2,0,0);
keyiter(le32_to_cpu(k[1]),r1,r0,r3,1,1);
keyiter(le32_to_cpu(k[2]),r2,r1,r4,2,2);
keyiter(le32_to_cpu(k[3]),r3,r2,r0,3,3);
keyiter(le32_to_cpu(k[4]),r4,r3,r1,4,4);
keyiter(le32_to_cpu(k[5]),r0,r4,r2,5,5);
keyiter(le32_to_cpu(k[6]),r1,r0,r3,6,6);
keyiter(le32_to_cpu(k[7]),r2,r1,r4,7,7);
keyiter(k[ 0],r3,r2,r0, 8, 8); keyiter(k[ 1],r4,r3,r1, 9, 9);
keyiter(k[ 2],r0,r4,r2, 10, 10); keyiter(k[ 3],r1,r0,r3, 11, 11);
keyiter(k[ 4],r2,r1,r4, 12, 12); keyiter(k[ 5],r3,r2,r0, 13, 13);
keyiter(k[ 6],r4,r3,r1, 14, 14); keyiter(k[ 7],r0,r4,r2, 15, 15);
keyiter(k[ 8],r1,r0,r3, 16, 16); keyiter(k[ 9],r2,r1,r4, 17, 17);
keyiter(k[ 10],r3,r2,r0, 18, 18); keyiter(k[ 11],r4,r3,r1, 19, 19);
keyiter(k[ 12],r0,r4,r2, 20, 20); keyiter(k[ 13],r1,r0,r3, 21, 21);
keyiter(k[ 14],r2,r1,r4, 22, 22); keyiter(k[ 15],r3,r2,r0, 23, 23);
keyiter(k[ 16],r4,r3,r1, 24, 24); keyiter(k[ 17],r0,r4,r2, 25, 25);
keyiter(k[ 18],r1,r0,r3, 26, 26); keyiter(k[ 19],r2,r1,r4, 27, 27);
keyiter(k[ 20],r3,r2,r0, 28, 28); keyiter(k[ 21],r4,r3,r1, 29, 29);
keyiter(k[ 22],r0,r4,r2, 30, 30); keyiter(k[ 23],r1,r0,r3, 31, 31);
k += 50;
keyiter(k[-26],r2,r1,r4, 32,-18); keyiter(k[-25],r3,r2,r0, 33,-17);
keyiter(k[-24],r4,r3,r1, 34,-16); keyiter(k[-23],r0,r4,r2, 35,-15);
keyiter(k[-22],r1,r0,r3, 36,-14); keyiter(k[-21],r2,r1,r4, 37,-13);
keyiter(k[-20],r3,r2,r0, 38,-12); keyiter(k[-19],r4,r3,r1, 39,-11);
keyiter(k[-18],r0,r4,r2, 40,-10); keyiter(k[-17],r1,r0,r3, 41, -9);
keyiter(k[-16],r2,r1,r4, 42, -8); keyiter(k[-15],r3,r2,r0, 43, -7);
keyiter(k[-14],r4,r3,r1, 44, -6); keyiter(k[-13],r0,r4,r2, 45, -5);
keyiter(k[-12],r1,r0,r3, 46, -4); keyiter(k[-11],r2,r1,r4, 47, -3);
keyiter(k[-10],r3,r2,r0, 48, -2); keyiter(k[ -9],r4,r3,r1, 49, -1);
keyiter(k[ -8],r0,r4,r2, 50, 0); keyiter(k[ -7],r1,r0,r3, 51, 1);
keyiter(k[ -6],r2,r1,r4, 52, 2); keyiter(k[ -5],r3,r2,r0, 53, 3);
keyiter(k[ -4],r4,r3,r1, 54, 4); keyiter(k[ -3],r0,r4,r2, 55, 5);
keyiter(k[ -2],r1,r0,r3, 56, 6); keyiter(k[ -1],r2,r1,r4, 57, 7);
keyiter(k[ 0],r3,r2,r0, 58, 8); keyiter(k[ 1],r4,r3,r1, 59, 9);
keyiter(k[ 2],r0,r4,r2, 60, 10); keyiter(k[ 3],r1,r0,r3, 61, 11);
keyiter(k[ 4],r2,r1,r4, 62, 12); keyiter(k[ 5],r3,r2,r0, 63, 13);
keyiter(k[ 6],r4,r3,r1, 64, 14); keyiter(k[ 7],r0,r4,r2, 65, 15);
keyiter(k[ 8],r1,r0,r3, 66, 16); keyiter(k[ 9],r2,r1,r4, 67, 17);
keyiter(k[ 10],r3,r2,r0, 68, 18); keyiter(k[ 11],r4,r3,r1, 69, 19);
keyiter(k[ 12],r0,r4,r2, 70, 20); keyiter(k[ 13],r1,r0,r3, 71, 21);
keyiter(k[ 14],r2,r1,r4, 72, 22); keyiter(k[ 15],r3,r2,r0, 73, 23);
keyiter(k[ 16],r4,r3,r1, 74, 24); keyiter(k[ 17],r0,r4,r2, 75, 25);
keyiter(k[ 18],r1,r0,r3, 76, 26); keyiter(k[ 19],r2,r1,r4, 77, 27);
keyiter(k[ 20],r3,r2,r0, 78, 28); keyiter(k[ 21],r4,r3,r1, 79, 29);
keyiter(k[ 22],r0,r4,r2, 80, 30); keyiter(k[ 23],r1,r0,r3, 81, 31);
k += 50;
keyiter(k[-26],r2,r1,r4, 82,-18); keyiter(k[-25],r3,r2,r0, 83,-17);
keyiter(k[-24],r4,r3,r1, 84,-16); keyiter(k[-23],r0,r4,r2, 85,-15);
keyiter(k[-22],r1,r0,r3, 86,-14); keyiter(k[-21],r2,r1,r4, 87,-13);
keyiter(k[-20],r3,r2,r0, 88,-12); keyiter(k[-19],r4,r3,r1, 89,-11);
keyiter(k[-18],r0,r4,r2, 90,-10); keyiter(k[-17],r1,r0,r3, 91, -9);
keyiter(k[-16],r2,r1,r4, 92, -8); keyiter(k[-15],r3,r2,r0, 93, -7);
keyiter(k[-14],r4,r3,r1, 94, -6); keyiter(k[-13],r0,r4,r2, 95, -5);
keyiter(k[-12],r1,r0,r3, 96, -4); keyiter(k[-11],r2,r1,r4, 97, -3);
keyiter(k[-10],r3,r2,r0, 98, -2); keyiter(k[ -9],r4,r3,r1, 99, -1);
keyiter(k[ -8],r0,r4,r2,100, 0); keyiter(k[ -7],r1,r0,r3,101, 1);
keyiter(k[ -6],r2,r1,r4,102, 2); keyiter(k[ -5],r3,r2,r0,103, 3);
keyiter(k[ -4],r4,r3,r1,104, 4); keyiter(k[ -3],r0,r4,r2,105, 5);
keyiter(k[ -2],r1,r0,r3,106, 6); keyiter(k[ -1],r2,r1,r4,107, 7);
keyiter(k[ 0],r3,r2,r0,108, 8); keyiter(k[ 1],r4,r3,r1,109, 9);
keyiter(k[ 2],r0,r4,r2,110, 10); keyiter(k[ 3],r1,r0,r3,111, 11);
keyiter(k[ 4],r2,r1,r4,112, 12); keyiter(k[ 5],r3,r2,r0,113, 13);
keyiter(k[ 6],r4,r3,r1,114, 14); keyiter(k[ 7],r0,r4,r2,115, 15);
keyiter(k[ 8],r1,r0,r3,116, 16); keyiter(k[ 9],r2,r1,r4,117, 17);
keyiter(k[ 10],r3,r2,r0,118, 18); keyiter(k[ 11],r4,r3,r1,119, 19);
keyiter(k[ 12],r0,r4,r2,120, 20); keyiter(k[ 13],r1,r0,r3,121, 21);
keyiter(k[ 14],r2,r1,r4,122, 22); keyiter(k[ 15],r3,r2,r0,123, 23);
keyiter(k[ 16],r4,r3,r1,124, 24); keyiter(k[ 17],r0,r4,r2,125, 25);
keyiter(k[ 18],r1,r0,r3,126, 26); keyiter(k[ 19],r2,r1,r4,127, 27);
keyiter(k[ 20],r3,r2,r0,128, 28); keyiter(k[ 21],r4,r3,r1,129, 29);
keyiter(k[ 22],r0,r4,r2,130, 30); keyiter(k[ 23],r1,r0,r3,131, 31);
/* Apply S-boxes */
S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3, 28); loadkeys(r1,r2,r4,r3, 24);
S4(r1,r2,r4,r3,r0); storekeys(r2,r4,r3,r0, 24); loadkeys(r2,r4,r3,r0, 20);
S5(r2,r4,r3,r0,r1); storekeys(r1,r2,r4,r0, 20); loadkeys(r1,r2,r4,r0, 16);
S6(r1,r2,r4,r0,r3); storekeys(r4,r3,r2,r0, 16); loadkeys(r4,r3,r2,r0, 12);
S7(r4,r3,r2,r0,r1); storekeys(r1,r2,r0,r4, 12); loadkeys(r1,r2,r0,r4, 8);
S0(r1,r2,r0,r4,r3); storekeys(r0,r2,r4,r1, 8); loadkeys(r0,r2,r4,r1, 4);
S1(r0,r2,r4,r1,r3); storekeys(r3,r4,r1,r0, 4); loadkeys(r3,r4,r1,r0, 0);
S2(r3,r4,r1,r0,r2); storekeys(r2,r4,r3,r0, 0); loadkeys(r2,r4,r3,r0, -4);
S3(r2,r4,r3,r0,r1); storekeys(r0,r1,r4,r2, -4); loadkeys(r0,r1,r4,r2, -8);
S4(r0,r1,r4,r2,r3); storekeys(r1,r4,r2,r3, -8); loadkeys(r1,r4,r2,r3,-12);
S5(r1,r4,r2,r3,r0); storekeys(r0,r1,r4,r3,-12); loadkeys(r0,r1,r4,r3,-16);
S6(r0,r1,r4,r3,r2); storekeys(r4,r2,r1,r3,-16); loadkeys(r4,r2,r1,r3,-20);
S7(r4,r2,r1,r3,r0); storekeys(r0,r1,r3,r4,-20); loadkeys(r0,r1,r3,r4,-24);
S0(r0,r1,r3,r4,r2); storekeys(r3,r1,r4,r0,-24); loadkeys(r3,r1,r4,r0,-28);
k -= 50;
S1(r3,r1,r4,r0,r2); storekeys(r2,r4,r0,r3, 22); loadkeys(r2,r4,r0,r3, 18);
S2(r2,r4,r0,r3,r1); storekeys(r1,r4,r2,r3, 18); loadkeys(r1,r4,r2,r3, 14);
S3(r1,r4,r2,r3,r0); storekeys(r3,r0,r4,r1, 14); loadkeys(r3,r0,r4,r1, 10);
S4(r3,r0,r4,r1,r2); storekeys(r0,r4,r1,r2, 10); loadkeys(r0,r4,r1,r2, 6);
S5(r0,r4,r1,r2,r3); storekeys(r3,r0,r4,r2, 6); loadkeys(r3,r0,r4,r2, 2);
S6(r3,r0,r4,r2,r1); storekeys(r4,r1,r0,r2, 2); loadkeys(r4,r1,r0,r2, -2);
S7(r4,r1,r0,r2,r3); storekeys(r3,r0,r2,r4, -2); loadkeys(r3,r0,r2,r4, -6);
S0(r3,r0,r2,r4,r1); storekeys(r2,r0,r4,r3, -6); loadkeys(r2,r0,r4,r3,-10);
S1(r2,r0,r4,r3,r1); storekeys(r1,r4,r3,r2,-10); loadkeys(r1,r4,r3,r2,-14);
S2(r1,r4,r3,r2,r0); storekeys(r0,r4,r1,r2,-14); loadkeys(r0,r4,r1,r2,-18);
S3(r0,r4,r1,r2,r3); storekeys(r2,r3,r4,r0,-18); loadkeys(r2,r3,r4,r0,-22);
k -= 50;
S4(r2,r3,r4,r0,r1); storekeys(r3,r4,r0,r1, 28); loadkeys(r3,r4,r0,r1, 24);
S5(r3,r4,r0,r1,r2); storekeys(r2,r3,r4,r1, 24); loadkeys(r2,r3,r4,r1, 20);
S6(r2,r3,r4,r1,r0); storekeys(r4,r0,r3,r1, 20); loadkeys(r4,r0,r3,r1, 16);
S7(r4,r0,r3,r1,r2); storekeys(r2,r3,r1,r4, 16); loadkeys(r2,r3,r1,r4, 12);
S0(r2,r3,r1,r4,r0); storekeys(r1,r3,r4,r2, 12); loadkeys(r1,r3,r4,r2, 8);
S1(r1,r3,r4,r2,r0); storekeys(r0,r4,r2,r1, 8); loadkeys(r0,r4,r2,r1, 4);
S2(r0,r4,r2,r1,r3); storekeys(r3,r4,r0,r1, 4); loadkeys(r3,r4,r0,r1, 0);
S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3, 0);
return 0;
}
static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
*k = ctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
/*
* Note: The conversions between u8* and u32* might cause trouble
* on architectures with stricter alignment rules than x86
*/
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0,r1,r2,r3,0);
S0(r0,r1,r2,r3,r4); LK(r2,r1,r3,r0,r4,1);
S1(r2,r1,r3,r0,r4); LK(r4,r3,r0,r2,r1,2);
S2(r4,r3,r0,r2,r1); LK(r1,r3,r4,r2,r0,3);
S3(r1,r3,r4,r2,r0); LK(r2,r0,r3,r1,r4,4);
S4(r2,r0,r3,r1,r4); LK(r0,r3,r1,r4,r2,5);
S5(r0,r3,r1,r4,r2); LK(r2,r0,r3,r4,r1,6);
S6(r2,r0,r3,r4,r1); LK(r3,r1,r0,r4,r2,7);
S7(r3,r1,r0,r4,r2); LK(r2,r0,r4,r3,r1,8);
S0(r2,r0,r4,r3,r1); LK(r4,r0,r3,r2,r1,9);
S1(r4,r0,r3,r2,r1); LK(r1,r3,r2,r4,r0,10);
S2(r1,r3,r2,r4,r0); LK(r0,r3,r1,r4,r2,11);
S3(r0,r3,r1,r4,r2); LK(r4,r2,r3,r0,r1,12);
S4(r4,r2,r3,r0,r1); LK(r2,r3,r0,r1,r4,13);
S5(r2,r3,r0,r1,r4); LK(r4,r2,r3,r1,r0,14);
S6(r4,r2,r3,r1,r0); LK(r3,r0,r2,r1,r4,15);
S7(r3,r0,r2,r1,r4); LK(r4,r2,r1,r3,r0,16);
S0(r4,r2,r1,r3,r0); LK(r1,r2,r3,r4,r0,17);
S1(r1,r2,r3,r4,r0); LK(r0,r3,r4,r1,r2,18);
S2(r0,r3,r4,r1,r2); LK(r2,r3,r0,r1,r4,19);
S3(r2,r3,r0,r1,r4); LK(r1,r4,r3,r2,r0,20);
S4(r1,r4,r3,r2,r0); LK(r4,r3,r2,r0,r1,21);
S5(r4,r3,r2,r0,r1); LK(r1,r4,r3,r0,r2,22);
S6(r1,r4,r3,r0,r2); LK(r3,r2,r4,r0,r1,23);
S7(r3,r2,r4,r0,r1); LK(r1,r4,r0,r3,r2,24);
S0(r1,r4,r0,r3,r2); LK(r0,r4,r3,r1,r2,25);
S1(r0,r4,r3,r1,r2); LK(r2,r3,r1,r0,r4,26);
S2(r2,r3,r1,r0,r4); LK(r4,r3,r2,r0,r1,27);
S3(r4,r3,r2,r0,r1); LK(r0,r1,r3,r4,r2,28);
S4(r0,r1,r3,r4,r2); LK(r1,r3,r4,r2,r0,29);
S5(r1,r3,r4,r2,r0); LK(r0,r1,r3,r2,r4,30);
S6(r0,r1,r3,r2,r4); LK(r3,r4,r1,r2,r0,31);
S7(r3,r4,r1,r2,r0); K(r0,r1,r2,r3,32);
d[0] = cpu_to_le32(r0);
d[1] = cpu_to_le32(r1);
d[2] = cpu_to_le32(r2);
d[3] = cpu_to_le32(r3);
}
static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
*k = ((struct serpent_ctx *)ctx)->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0,r1,r2,r3,32);
SI7(r0,r1,r2,r3,r4); KL(r1,r3,r0,r4,r2,31);
SI6(r1,r3,r0,r4,r2); KL(r0,r2,r4,r1,r3,30);
SI5(r0,r2,r4,r1,r3); KL(r2,r3,r0,r4,r1,29);
SI4(r2,r3,r0,r4,r1); KL(r2,r0,r1,r4,r3,28);
SI3(r2,r0,r1,r4,r3); KL(r1,r2,r3,r4,r0,27);
SI2(r1,r2,r3,r4,r0); KL(r2,r0,r4,r3,r1,26);
SI1(r2,r0,r4,r3,r1); KL(r1,r0,r4,r3,r2,25);
SI0(r1,r0,r4,r3,r2); KL(r4,r2,r0,r1,r3,24);
SI7(r4,r2,r0,r1,r3); KL(r2,r1,r4,r3,r0,23);
SI6(r2,r1,r4,r3,r0); KL(r4,r0,r3,r2,r1,22);
SI5(r4,r0,r3,r2,r1); KL(r0,r1,r4,r3,r2,21);
SI4(r0,r1,r4,r3,r2); KL(r0,r4,r2,r3,r1,20);
SI3(r0,r4,r2,r3,r1); KL(r2,r0,r1,r3,r4,19);
SI2(r2,r0,r1,r3,r4); KL(r0,r4,r3,r1,r2,18);
SI1(r0,r4,r3,r1,r2); KL(r2,r4,r3,r1,r0,17);
SI0(r2,r4,r3,r1,r0); KL(r3,r0,r4,r2,r1,16);
SI7(r3,r0,r4,r2,r1); KL(r0,r2,r3,r1,r4,15);
SI6(r0,r2,r3,r1,r4); KL(r3,r4,r1,r0,r2,14);
SI5(r3,r4,r1,r0,r2); KL(r4,r2,r3,r1,r0,13);
SI4(r4,r2,r3,r1,r0); KL(r4,r3,r0,r1,r2,12);
SI3(r4,r3,r0,r1,r2); KL(r0,r4,r2,r1,r3,11);
SI2(r0,r4,r2,r1,r3); KL(r4,r3,r1,r2,r0,10);
SI1(r4,r3,r1,r2,r0); KL(r0,r3,r1,r2,r4,9);
SI0(r0,r3,r1,r2,r4); KL(r1,r4,r3,r0,r2,8);
SI7(r1,r4,r3,r0,r2); KL(r4,r0,r1,r2,r3,7);
SI6(r4,r0,r1,r2,r3); KL(r1,r3,r2,r4,r0,6);
SI5(r1,r3,r2,r4,r0); KL(r3,r0,r1,r2,r4,5);
SI4(r3,r0,r1,r2,r4); KL(r3,r1,r4,r2,r0,4);
SI3(r3,r1,r4,r2,r0); KL(r4,r3,r0,r2,r1,3);
SI2(r4,r3,r0,r2,r1); KL(r3,r1,r2,r0,r4,2);
SI1(r3,r1,r2,r0,r4); KL(r4,r1,r2,r0,r3,1);
SI0(r4,r1,r2,r0,r3); K(r2,r3,r1,r4,0);
d[0] = cpu_to_le32(r2);
d[1] = cpu_to_le32(r3);
d[2] = cpu_to_le32(r1);
d[3] = cpu_to_le32(r4);
}
static struct crypto_alg serpent_alg = {
.cra_name = "serpent",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = serpent_setkey,
.cia_encrypt = serpent_encrypt,
.cia_decrypt = serpent_decrypt } }
};
static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
u8 rev_key[SERPENT_MAX_KEY_SIZE];
int i;
for (i = 0; i < keylen; ++i)
rev_key[keylen - i - 1] = key[i];
return serpent_setkey(tfm, rev_key, keylen);
}
static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static struct crypto_alg tnepres_alg = {
.cra_name = "tnepres",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = tnepres_setkey,
.cia_encrypt = tnepres_encrypt,
.cia_decrypt = tnepres_decrypt } }
};
static int __init serpent_mod_init(void)
{
int ret = crypto_register_alg(&serpent_alg);
if (ret)
return ret;
ret = crypto_register_alg(&tnepres_alg);
if (ret)
crypto_unregister_alg(&serpent_alg);
return ret;
}
static void __exit serpent_mod_fini(void)
{
crypto_unregister_alg(&tnepres_alg);
crypto_unregister_alg(&serpent_alg);
}
module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
MODULE_ALIAS("tnepres");

684
crypto/serpent_generic.c Normal file
Просмотреть файл

@ -0,0 +1,684 @@
/*
* Cryptographic API.
*
* Serpent Cipher Algorithm.
*
* Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
* 2003 Herbert Valerio Riedel <hvr@gnu.org>
*
* Added tnepres support:
* Ruben Jesus Garcia Hernandez <ruben@ugr.es>, 18.10.2004
* Based on code by hvr
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/serpent.h>
/* Key is padded to the maximum of 256 bits before round key generation.
* Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
*/
#define PHI 0x9e3779b9UL
#define keyiter(a, b, c, d, i, j) \
({ b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b, 11); k[j] = b; })
#define loadkeys(x0, x1, x2, x3, i) \
({ x0 = k[i]; x1 = k[i+1]; x2 = k[i+2]; x3 = k[i+3]; })
#define storekeys(x0, x1, x2, x3, i) \
({ k[i] = x0; k[i+1] = x1; k[i+2] = x2; k[i+3] = x3; })
#define store_and_load_keys(x0, x1, x2, x3, s, l) \
({ storekeys(x0, x1, x2, x3, s); loadkeys(x0, x1, x2, x3, l); })
#define K(x0, x1, x2, x3, i) ({ \
x3 ^= k[4*(i)+3]; x2 ^= k[4*(i)+2]; \
x1 ^= k[4*(i)+1]; x0 ^= k[4*(i)+0]; \
})
#define LK(x0, x1, x2, x3, x4, i) ({ \
x0 = rol32(x0, 13);\
x2 = rol32(x2, 3); x1 ^= x0; x4 = x0 << 3; \
x3 ^= x2; x1 ^= x2; \
x1 = rol32(x1, 1); x3 ^= x4; \
x3 = rol32(x3, 7); x4 = x1; \
x0 ^= x1; x4 <<= 7; x2 ^= x3; \
x0 ^= x3; x2 ^= x4; x3 ^= k[4*i+3]; \
x1 ^= k[4*i+1]; x0 = rol32(x0, 5); x2 = rol32(x2, 22);\
x0 ^= k[4*i+0]; x2 ^= k[4*i+2]; \
})
#define KL(x0, x1, x2, x3, x4, i) ({ \
x0 ^= k[4*i+0]; x1 ^= k[4*i+1]; x2 ^= k[4*i+2]; \
x3 ^= k[4*i+3]; x0 = ror32(x0, 5); x2 = ror32(x2, 22);\
x4 = x1; x2 ^= x3; x0 ^= x3; \
x4 <<= 7; x0 ^= x1; x1 = ror32(x1, 1); \
x2 ^= x4; x3 = ror32(x3, 7); x4 = x0 << 3; \
x1 ^= x0; x3 ^= x4; x0 = ror32(x0, 13);\
x1 ^= x2; x3 ^= x2; x2 = ror32(x2, 3); \
})
#define S0(x0, x1, x2, x3, x4) ({ \
x4 = x3; \
x3 |= x0; x0 ^= x4; x4 ^= x2; \
x4 = ~x4; x3 ^= x1; x1 &= x0; \
x1 ^= x4; x2 ^= x0; x0 ^= x3; \
x4 |= x0; x0 ^= x2; x2 &= x1; \
x3 ^= x2; x1 = ~x1; x2 ^= x4; \
x1 ^= x2; \
})
#define S1(x0, x1, x2, x3, x4) ({ \
x4 = x1; \
x1 ^= x0; x0 ^= x3; x3 = ~x3; \
x4 &= x1; x0 |= x1; x3 ^= x2; \
x0 ^= x3; x1 ^= x3; x3 ^= x4; \
x1 |= x4; x4 ^= x2; x2 &= x0; \
x2 ^= x1; x1 |= x0; x0 = ~x0; \
x0 ^= x2; x4 ^= x1; \
})
#define S2(x0, x1, x2, x3, x4) ({ \
x3 = ~x3; \
x1 ^= x0; x4 = x0; x0 &= x2; \
x0 ^= x3; x3 |= x4; x2 ^= x1; \
x3 ^= x1; x1 &= x0; x0 ^= x2; \
x2 &= x3; x3 |= x1; x0 = ~x0; \
x3 ^= x0; x4 ^= x0; x0 ^= x2; \
x1 |= x2; \
})
#define S3(x0, x1, x2, x3, x4) ({ \
x4 = x1; \
x1 ^= x3; x3 |= x0; x4 &= x0; \
x0 ^= x2; x2 ^= x1; x1 &= x3; \
x2 ^= x3; x0 |= x4; x4 ^= x3; \
x1 ^= x0; x0 &= x3; x3 &= x4; \
x3 ^= x2; x4 |= x1; x2 &= x1; \
x4 ^= x3; x0 ^= x3; x3 ^= x2; \
})
#define S4(x0, x1, x2, x3, x4) ({ \
x4 = x3; \
x3 &= x0; x0 ^= x4; \
x3 ^= x2; x2 |= x4; x0 ^= x1; \
x4 ^= x3; x2 |= x0; \
x2 ^= x1; x1 &= x0; \
x1 ^= x4; x4 &= x2; x2 ^= x3; \
x4 ^= x0; x3 |= x1; x1 = ~x1; \
x3 ^= x0; \
})
#define S5(x0, x1, x2, x3, x4) ({ \
x4 = x1; x1 |= x0; \
x2 ^= x1; x3 = ~x3; x4 ^= x0; \
x0 ^= x2; x1 &= x4; x4 |= x3; \
x4 ^= x0; x0 &= x3; x1 ^= x3; \
x3 ^= x2; x0 ^= x1; x2 &= x4; \
x1 ^= x2; x2 &= x0; \
x3 ^= x2; \
})
#define S6(x0, x1, x2, x3, x4) ({ \
x4 = x1; \
x3 ^= x0; x1 ^= x2; x2 ^= x0; \
x0 &= x3; x1 |= x3; x4 = ~x4; \
x0 ^= x1; x1 ^= x2; \
x3 ^= x4; x4 ^= x0; x2 &= x0; \
x4 ^= x1; x2 ^= x3; x3 &= x1; \
x3 ^= x0; x1 ^= x2; \
})
#define S7(x0, x1, x2, x3, x4) ({ \
x1 = ~x1; \
x4 = x1; x0 = ~x0; x1 &= x2; \
x1 ^= x3; x3 |= x4; x4 ^= x2; \
x2 ^= x3; x3 ^= x0; x0 |= x1; \
x2 &= x0; x0 ^= x4; x4 ^= x3; \
x3 &= x0; x4 ^= x1; \
x2 ^= x4; x3 ^= x1; x4 |= x0; \
x4 ^= x1; \
})
#define SI0(x0, x1, x2, x3, x4) ({ \
x4 = x3; x1 ^= x0; \
x3 |= x1; x4 ^= x1; x0 = ~x0; \
x2 ^= x3; x3 ^= x0; x0 &= x1; \
x0 ^= x2; x2 &= x3; x3 ^= x4; \
x2 ^= x3; x1 ^= x3; x3 &= x0; \
x1 ^= x0; x0 ^= x2; x4 ^= x3; \
})
#define SI1(x0, x1, x2, x3, x4) ({ \
x1 ^= x3; x4 = x0; \
x0 ^= x2; x2 = ~x2; x4 |= x1; \
x4 ^= x3; x3 &= x1; x1 ^= x2; \
x2 &= x4; x4 ^= x1; x1 |= x3; \
x3 ^= x0; x2 ^= x0; x0 |= x4; \
x2 ^= x4; x1 ^= x0; \
x4 ^= x1; \
})
#define SI2(x0, x1, x2, x3, x4) ({ \
x2 ^= x1; x4 = x3; x3 = ~x3; \
x3 |= x2; x2 ^= x4; x4 ^= x0; \
x3 ^= x1; x1 |= x2; x2 ^= x0; \
x1 ^= x4; x4 |= x3; x2 ^= x3; \
x4 ^= x2; x2 &= x1; \
x2 ^= x3; x3 ^= x4; x4 ^= x0; \
})
#define SI3(x0, x1, x2, x3, x4) ({ \
x2 ^= x1; \
x4 = x1; x1 &= x2; \
x1 ^= x0; x0 |= x4; x4 ^= x3; \
x0 ^= x3; x3 |= x1; x1 ^= x2; \
x1 ^= x3; x0 ^= x2; x2 ^= x3; \
x3 &= x1; x1 ^= x0; x0 &= x2; \
x4 ^= x3; x3 ^= x0; x0 ^= x1; \
})
#define SI4(x0, x1, x2, x3, x4) ({ \
x2 ^= x3; x4 = x0; x0 &= x1; \
x0 ^= x2; x2 |= x3; x4 = ~x4; \
x1 ^= x0; x0 ^= x2; x2 &= x4; \
x2 ^= x0; x0 |= x4; \
x0 ^= x3; x3 &= x2; \
x4 ^= x3; x3 ^= x1; x1 &= x0; \
x4 ^= x1; x0 ^= x3; \
})
#define SI5(x0, x1, x2, x3, x4) ({ \
x4 = x1; x1 |= x2; \
x2 ^= x4; x1 ^= x3; x3 &= x4; \
x2 ^= x3; x3 |= x0; x0 = ~x0; \
x3 ^= x2; x2 |= x0; x4 ^= x1; \
x2 ^= x4; x4 &= x0; x0 ^= x1; \
x1 ^= x3; x0 &= x2; x2 ^= x3; \
x0 ^= x2; x2 ^= x4; x4 ^= x3; \
})
#define SI6(x0, x1, x2, x3, x4) ({ \
x0 ^= x2; \
x4 = x0; x0 &= x3; x2 ^= x3; \
x0 ^= x2; x3 ^= x1; x2 |= x4; \
x2 ^= x3; x3 &= x0; x0 = ~x0; \
x3 ^= x1; x1 &= x2; x4 ^= x0; \
x3 ^= x4; x4 ^= x2; x0 ^= x1; \
x2 ^= x0; \
})
#define SI7(x0, x1, x2, x3, x4) ({ \
x4 = x3; x3 &= x0; x0 ^= x2; \
x2 |= x4; x4 ^= x1; x0 = ~x0; \
x1 |= x3; x4 ^= x0; x0 &= x2; \
x0 ^= x1; x1 &= x2; x3 ^= x2; \
x4 ^= x3; x2 &= x3; x3 |= x0; \
x1 ^= x4; x3 ^= x4; x4 &= x0; \
x4 ^= x2; \
})
int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
unsigned int keylen)
{
u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k;
u32 r0, r1, r2, r3, r4;
int i;
/* Copy key, add padding */
for (i = 0; i < keylen; ++i)
k8[i] = key[i];
if (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 1;
while (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 0;
/* Expand key using polynomial */
r0 = le32_to_cpu(k[3]);
r1 = le32_to_cpu(k[4]);
r2 = le32_to_cpu(k[5]);
r3 = le32_to_cpu(k[6]);
r4 = le32_to_cpu(k[7]);
keyiter(le32_to_cpu(k[0]), r0, r4, r2, 0, 0);
keyiter(le32_to_cpu(k[1]), r1, r0, r3, 1, 1);
keyiter(le32_to_cpu(k[2]), r2, r1, r4, 2, 2);
keyiter(le32_to_cpu(k[3]), r3, r2, r0, 3, 3);
keyiter(le32_to_cpu(k[4]), r4, r3, r1, 4, 4);
keyiter(le32_to_cpu(k[5]), r0, r4, r2, 5, 5);
keyiter(le32_to_cpu(k[6]), r1, r0, r3, 6, 6);
keyiter(le32_to_cpu(k[7]), r2, r1, r4, 7, 7);
keyiter(k[0], r3, r2, r0, 8, 8);
keyiter(k[1], r4, r3, r1, 9, 9);
keyiter(k[2], r0, r4, r2, 10, 10);
keyiter(k[3], r1, r0, r3, 11, 11);
keyiter(k[4], r2, r1, r4, 12, 12);
keyiter(k[5], r3, r2, r0, 13, 13);
keyiter(k[6], r4, r3, r1, 14, 14);
keyiter(k[7], r0, r4, r2, 15, 15);
keyiter(k[8], r1, r0, r3, 16, 16);
keyiter(k[9], r2, r1, r4, 17, 17);
keyiter(k[10], r3, r2, r0, 18, 18);
keyiter(k[11], r4, r3, r1, 19, 19);
keyiter(k[12], r0, r4, r2, 20, 20);
keyiter(k[13], r1, r0, r3, 21, 21);
keyiter(k[14], r2, r1, r4, 22, 22);
keyiter(k[15], r3, r2, r0, 23, 23);
keyiter(k[16], r4, r3, r1, 24, 24);
keyiter(k[17], r0, r4, r2, 25, 25);
keyiter(k[18], r1, r0, r3, 26, 26);
keyiter(k[19], r2, r1, r4, 27, 27);
keyiter(k[20], r3, r2, r0, 28, 28);
keyiter(k[21], r4, r3, r1, 29, 29);
keyiter(k[22], r0, r4, r2, 30, 30);
keyiter(k[23], r1, r0, r3, 31, 31);
k += 50;
keyiter(k[-26], r2, r1, r4, 32, -18);
keyiter(k[-25], r3, r2, r0, 33, -17);
keyiter(k[-24], r4, r3, r1, 34, -16);
keyiter(k[-23], r0, r4, r2, 35, -15);
keyiter(k[-22], r1, r0, r3, 36, -14);
keyiter(k[-21], r2, r1, r4, 37, -13);
keyiter(k[-20], r3, r2, r0, 38, -12);
keyiter(k[-19], r4, r3, r1, 39, -11);
keyiter(k[-18], r0, r4, r2, 40, -10);
keyiter(k[-17], r1, r0, r3, 41, -9);
keyiter(k[-16], r2, r1, r4, 42, -8);
keyiter(k[-15], r3, r2, r0, 43, -7);
keyiter(k[-14], r4, r3, r1, 44, -6);
keyiter(k[-13], r0, r4, r2, 45, -5);
keyiter(k[-12], r1, r0, r3, 46, -4);
keyiter(k[-11], r2, r1, r4, 47, -3);
keyiter(k[-10], r3, r2, r0, 48, -2);
keyiter(k[-9], r4, r3, r1, 49, -1);
keyiter(k[-8], r0, r4, r2, 50, 0);
keyiter(k[-7], r1, r0, r3, 51, 1);
keyiter(k[-6], r2, r1, r4, 52, 2);
keyiter(k[-5], r3, r2, r0, 53, 3);
keyiter(k[-4], r4, r3, r1, 54, 4);
keyiter(k[-3], r0, r4, r2, 55, 5);
keyiter(k[-2], r1, r0, r3, 56, 6);
keyiter(k[-1], r2, r1, r4, 57, 7);
keyiter(k[0], r3, r2, r0, 58, 8);
keyiter(k[1], r4, r3, r1, 59, 9);
keyiter(k[2], r0, r4, r2, 60, 10);
keyiter(k[3], r1, r0, r3, 61, 11);
keyiter(k[4], r2, r1, r4, 62, 12);
keyiter(k[5], r3, r2, r0, 63, 13);
keyiter(k[6], r4, r3, r1, 64, 14);
keyiter(k[7], r0, r4, r2, 65, 15);
keyiter(k[8], r1, r0, r3, 66, 16);
keyiter(k[9], r2, r1, r4, 67, 17);
keyiter(k[10], r3, r2, r0, 68, 18);
keyiter(k[11], r4, r3, r1, 69, 19);
keyiter(k[12], r0, r4, r2, 70, 20);
keyiter(k[13], r1, r0, r3, 71, 21);
keyiter(k[14], r2, r1, r4, 72, 22);
keyiter(k[15], r3, r2, r0, 73, 23);
keyiter(k[16], r4, r3, r1, 74, 24);
keyiter(k[17], r0, r4, r2, 75, 25);
keyiter(k[18], r1, r0, r3, 76, 26);
keyiter(k[19], r2, r1, r4, 77, 27);
keyiter(k[20], r3, r2, r0, 78, 28);
keyiter(k[21], r4, r3, r1, 79, 29);
keyiter(k[22], r0, r4, r2, 80, 30);
keyiter(k[23], r1, r0, r3, 81, 31);
k += 50;
keyiter(k[-26], r2, r1, r4, 82, -18);
keyiter(k[-25], r3, r2, r0, 83, -17);
keyiter(k[-24], r4, r3, r1, 84, -16);
keyiter(k[-23], r0, r4, r2, 85, -15);
keyiter(k[-22], r1, r0, r3, 86, -14);
keyiter(k[-21], r2, r1, r4, 87, -13);
keyiter(k[-20], r3, r2, r0, 88, -12);
keyiter(k[-19], r4, r3, r1, 89, -11);
keyiter(k[-18], r0, r4, r2, 90, -10);
keyiter(k[-17], r1, r0, r3, 91, -9);
keyiter(k[-16], r2, r1, r4, 92, -8);
keyiter(k[-15], r3, r2, r0, 93, -7);
keyiter(k[-14], r4, r3, r1, 94, -6);
keyiter(k[-13], r0, r4, r2, 95, -5);
keyiter(k[-12], r1, r0, r3, 96, -4);
keyiter(k[-11], r2, r1, r4, 97, -3);
keyiter(k[-10], r3, r2, r0, 98, -2);
keyiter(k[-9], r4, r3, r1, 99, -1);
keyiter(k[-8], r0, r4, r2, 100, 0);
keyiter(k[-7], r1, r0, r3, 101, 1);
keyiter(k[-6], r2, r1, r4, 102, 2);
keyiter(k[-5], r3, r2, r0, 103, 3);
keyiter(k[-4], r4, r3, r1, 104, 4);
keyiter(k[-3], r0, r4, r2, 105, 5);
keyiter(k[-2], r1, r0, r3, 106, 6);
keyiter(k[-1], r2, r1, r4, 107, 7);
keyiter(k[0], r3, r2, r0, 108, 8);
keyiter(k[1], r4, r3, r1, 109, 9);
keyiter(k[2], r0, r4, r2, 110, 10);
keyiter(k[3], r1, r0, r3, 111, 11);
keyiter(k[4], r2, r1, r4, 112, 12);
keyiter(k[5], r3, r2, r0, 113, 13);
keyiter(k[6], r4, r3, r1, 114, 14);
keyiter(k[7], r0, r4, r2, 115, 15);
keyiter(k[8], r1, r0, r3, 116, 16);
keyiter(k[9], r2, r1, r4, 117, 17);
keyiter(k[10], r3, r2, r0, 118, 18);
keyiter(k[11], r4, r3, r1, 119, 19);
keyiter(k[12], r0, r4, r2, 120, 20);
keyiter(k[13], r1, r0, r3, 121, 21);
keyiter(k[14], r2, r1, r4, 122, 22);
keyiter(k[15], r3, r2, r0, 123, 23);
keyiter(k[16], r4, r3, r1, 124, 24);
keyiter(k[17], r0, r4, r2, 125, 25);
keyiter(k[18], r1, r0, r3, 126, 26);
keyiter(k[19], r2, r1, r4, 127, 27);
keyiter(k[20], r3, r2, r0, 128, 28);
keyiter(k[21], r4, r3, r1, 129, 29);
keyiter(k[22], r0, r4, r2, 130, 30);
keyiter(k[23], r1, r0, r3, 131, 31);
/* Apply S-boxes */
S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20);
S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16);
S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12);
S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8);
S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4);
S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0);
S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4);
S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8);
S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12);
S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16);
S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20);
S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24);
S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28);
k -= 50;
S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18);
S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14);
S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10);
S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6);
S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2);
S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2);
S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6);
S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10);
S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14);
S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18);
S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22);
k -= 50;
S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24);
S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20);
S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16);
S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12);
S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8);
S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4);
S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0);
S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0);
return 0;
}
EXPORT_SYMBOL_GPL(__serpent_setkey);
int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
return __serpent_setkey(crypto_tfm_ctx(tfm), key, keylen);
}
EXPORT_SYMBOL_GPL(serpent_setkey);
void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
{
const u32 *k = ctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
/*
* Note: The conversions between u8* and u32* might cause trouble
* on architectures with stricter alignment rules than x86
*/
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0, r1, r2, r3, 0);
S0(r0, r1, r2, r3, r4); LK(r2, r1, r3, r0, r4, 1);
S1(r2, r1, r3, r0, r4); LK(r4, r3, r0, r2, r1, 2);
S2(r4, r3, r0, r2, r1); LK(r1, r3, r4, r2, r0, 3);
S3(r1, r3, r4, r2, r0); LK(r2, r0, r3, r1, r4, 4);
S4(r2, r0, r3, r1, r4); LK(r0, r3, r1, r4, r2, 5);
S5(r0, r3, r1, r4, r2); LK(r2, r0, r3, r4, r1, 6);
S6(r2, r0, r3, r4, r1); LK(r3, r1, r0, r4, r2, 7);
S7(r3, r1, r0, r4, r2); LK(r2, r0, r4, r3, r1, 8);
S0(r2, r0, r4, r3, r1); LK(r4, r0, r3, r2, r1, 9);
S1(r4, r0, r3, r2, r1); LK(r1, r3, r2, r4, r0, 10);
S2(r1, r3, r2, r4, r0); LK(r0, r3, r1, r4, r2, 11);
S3(r0, r3, r1, r4, r2); LK(r4, r2, r3, r0, r1, 12);
S4(r4, r2, r3, r0, r1); LK(r2, r3, r0, r1, r4, 13);
S5(r2, r3, r0, r1, r4); LK(r4, r2, r3, r1, r0, 14);
S6(r4, r2, r3, r1, r0); LK(r3, r0, r2, r1, r4, 15);
S7(r3, r0, r2, r1, r4); LK(r4, r2, r1, r3, r0, 16);
S0(r4, r2, r1, r3, r0); LK(r1, r2, r3, r4, r0, 17);
S1(r1, r2, r3, r4, r0); LK(r0, r3, r4, r1, r2, 18);
S2(r0, r3, r4, r1, r2); LK(r2, r3, r0, r1, r4, 19);
S3(r2, r3, r0, r1, r4); LK(r1, r4, r3, r2, r0, 20);
S4(r1, r4, r3, r2, r0); LK(r4, r3, r2, r0, r1, 21);
S5(r4, r3, r2, r0, r1); LK(r1, r4, r3, r0, r2, 22);
S6(r1, r4, r3, r0, r2); LK(r3, r2, r4, r0, r1, 23);
S7(r3, r2, r4, r0, r1); LK(r1, r4, r0, r3, r2, 24);
S0(r1, r4, r0, r3, r2); LK(r0, r4, r3, r1, r2, 25);
S1(r0, r4, r3, r1, r2); LK(r2, r3, r1, r0, r4, 26);
S2(r2, r3, r1, r0, r4); LK(r4, r3, r2, r0, r1, 27);
S3(r4, r3, r2, r0, r1); LK(r0, r1, r3, r4, r2, 28);
S4(r0, r1, r3, r4, r2); LK(r1, r3, r4, r2, r0, 29);
S5(r1, r3, r4, r2, r0); LK(r0, r1, r3, r2, r4, 30);
S6(r0, r1, r3, r2, r4); LK(r3, r4, r1, r2, r0, 31);
S7(r3, r4, r1, r2, r0); K(r0, r1, r2, r3, 32);
d[0] = cpu_to_le32(r0);
d[1] = cpu_to_le32(r1);
d[2] = cpu_to_le32(r2);
d[3] = cpu_to_le32(r3);
}
EXPORT_SYMBOL_GPL(__serpent_encrypt);
static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
__serpent_encrypt(ctx, dst, src);
}
void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
{
const u32 *k = ctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0, r1, r2, r3, 32);
SI7(r0, r1, r2, r3, r4); KL(r1, r3, r0, r4, r2, 31);
SI6(r1, r3, r0, r4, r2); KL(r0, r2, r4, r1, r3, 30);
SI5(r0, r2, r4, r1, r3); KL(r2, r3, r0, r4, r1, 29);
SI4(r2, r3, r0, r4, r1); KL(r2, r0, r1, r4, r3, 28);
SI3(r2, r0, r1, r4, r3); KL(r1, r2, r3, r4, r0, 27);
SI2(r1, r2, r3, r4, r0); KL(r2, r0, r4, r3, r1, 26);
SI1(r2, r0, r4, r3, r1); KL(r1, r0, r4, r3, r2, 25);
SI0(r1, r0, r4, r3, r2); KL(r4, r2, r0, r1, r3, 24);
SI7(r4, r2, r0, r1, r3); KL(r2, r1, r4, r3, r0, 23);
SI6(r2, r1, r4, r3, r0); KL(r4, r0, r3, r2, r1, 22);
SI5(r4, r0, r3, r2, r1); KL(r0, r1, r4, r3, r2, 21);
SI4(r0, r1, r4, r3, r2); KL(r0, r4, r2, r3, r1, 20);
SI3(r0, r4, r2, r3, r1); KL(r2, r0, r1, r3, r4, 19);
SI2(r2, r0, r1, r3, r4); KL(r0, r4, r3, r1, r2, 18);
SI1(r0, r4, r3, r1, r2); KL(r2, r4, r3, r1, r0, 17);
SI0(r2, r4, r3, r1, r0); KL(r3, r0, r4, r2, r1, 16);
SI7(r3, r0, r4, r2, r1); KL(r0, r2, r3, r1, r4, 15);
SI6(r0, r2, r3, r1, r4); KL(r3, r4, r1, r0, r2, 14);
SI5(r3, r4, r1, r0, r2); KL(r4, r2, r3, r1, r0, 13);
SI4(r4, r2, r3, r1, r0); KL(r4, r3, r0, r1, r2, 12);
SI3(r4, r3, r0, r1, r2); KL(r0, r4, r2, r1, r3, 11);
SI2(r0, r4, r2, r1, r3); KL(r4, r3, r1, r2, r0, 10);
SI1(r4, r3, r1, r2, r0); KL(r0, r3, r1, r2, r4, 9);
SI0(r0, r3, r1, r2, r4); KL(r1, r4, r3, r0, r2, 8);
SI7(r1, r4, r3, r0, r2); KL(r4, r0, r1, r2, r3, 7);
SI6(r4, r0, r1, r2, r3); KL(r1, r3, r2, r4, r0, 6);
SI5(r1, r3, r2, r4, r0); KL(r3, r0, r1, r2, r4, 5);
SI4(r3, r0, r1, r2, r4); KL(r3, r1, r4, r2, r0, 4);
SI3(r3, r1, r4, r2, r0); KL(r4, r3, r0, r2, r1, 3);
SI2(r4, r3, r0, r2, r1); KL(r3, r1, r2, r0, r4, 2);
SI1(r3, r1, r2, r0, r4); KL(r4, r1, r2, r0, r3, 1);
SI0(r4, r1, r2, r0, r3); K(r2, r3, r1, r4, 0);
d[0] = cpu_to_le32(r2);
d[1] = cpu_to_le32(r3);
d[2] = cpu_to_le32(r1);
d[3] = cpu_to_le32(r4);
}
EXPORT_SYMBOL_GPL(__serpent_decrypt);
static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
__serpent_decrypt(ctx, dst, src);
}
static struct crypto_alg serpent_alg = {
.cra_name = "serpent",
.cra_driver_name = "serpent-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = serpent_setkey,
.cia_encrypt = serpent_encrypt,
.cia_decrypt = serpent_decrypt } }
};
static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
u8 rev_key[SERPENT_MAX_KEY_SIZE];
int i;
for (i = 0; i < keylen; ++i)
rev_key[keylen - i - 1] = key[i];
return serpent_setkey(tfm, rev_key, keylen);
}
static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static struct crypto_alg tnepres_alg = {
.cra_name = "tnepres",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = tnepres_setkey,
.cia_encrypt = tnepres_encrypt,
.cia_decrypt = tnepres_decrypt } }
};
static int __init serpent_mod_init(void)
{
int ret = crypto_register_alg(&serpent_alg);
if (ret)
return ret;
ret = crypto_register_alg(&tnepres_alg);
if (ret)
crypto_unregister_alg(&serpent_alg);
return ret;
}
static void __exit serpent_mod_fini(void)
{
crypto_unregister_alg(&tnepres_alg);
crypto_unregister_alg(&serpent_alg);
}
module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
MODULE_ALIAS("tnepres");
MODULE_ALIAS("serpent");

Просмотреть файл

@ -719,6 +719,207 @@ out:
crypto_free_ahash(tfm);
}
static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
ret = wait_for_completion_interruptible(&tr->completion);
if (!ret)
ret = tr->err;
INIT_COMPLETION(tr->completion);
}
return ret;
}
static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
int blen, int sec)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + sec * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
if (enc)
ret = do_one_acipher_op(req,
crypto_ablkcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_ablkcipher_decrypt(req));
if (ret)
return ret;
}
pr_cont("%d operations in %d seconds (%ld bytes)\n",
bcount, sec, (long)bcount * blen);
return 0;
}
static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
int blen)
{
unsigned long cycles = 0;
int ret = 0;
int i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
ret = do_one_acipher_op(req,
crypto_ablkcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_ablkcipher_decrypt(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
if (enc)
ret = do_one_acipher_op(req,
crypto_ablkcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_ablkcipher_decrypt(req));
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
out:
if (ret == 0)
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
return ret;
}
static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize)
{
unsigned int ret, i, j, iv_len;
struct tcrypt_result tresult;
const char *key;
char iv[128];
struct ablkcipher_request *req;
struct crypto_ablkcipher *tfm;
const char *e;
u32 *b_size;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
pr_info("\ntesting speed of async %s %s\n", algo, e);
init_completion(&tresult.completion);
tfm = crypto_alloc_ablkcipher(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
algo);
goto out;
}
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &tresult);
i = 0;
do {
b_size = block_sizes;
do {
struct scatterlist sg[TVMEMSIZE];
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for "
"tvmem (%lu)\n", *keysize + *b_size,
TVMEMSIZE * PAGE_SIZE);
goto out_free_req;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, *b_size);
memset(tvmem[0], 0xff, PAGE_SIZE);
/* set key, plain text and IV */
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
crypto_ablkcipher_clear_flags(tfm, ~0);
ret = crypto_ablkcipher_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_ablkcipher_get_flags(tfm));
goto out_free_req;
}
sg_init_table(sg, TVMEMSIZE);
sg_set_buf(sg, tvmem[0] + *keysize,
PAGE_SIZE - *keysize);
for (j = 1; j < TVMEMSIZE; j++) {
sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
memset(tvmem[j], 0xff, PAGE_SIZE);
}
iv_len = crypto_ablkcipher_ivsize(tfm);
if (iv_len)
memset(&iv, 0xff, iv_len);
ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv);
if (sec)
ret = test_acipher_jiffies(req, enc,
*b_size, sec);
else
ret = test_acipher_cycles(req, enc,
*b_size);
if (ret) {
pr_err("%s() failed flags=%x\n", e,
crypto_ablkcipher_get_flags(tfm));
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out_free_req:
ablkcipher_request_free(req);
out:
crypto_free_ablkcipher(tfm);
}
static void test_available(void)
{
char **name = check;
@ -789,10 +990,16 @@ static int do_test(int m)
ret += tcrypt_test("ecb(twofish)");
ret += tcrypt_test("cbc(twofish)");
ret += tcrypt_test("ctr(twofish)");
ret += tcrypt_test("lrw(twofish)");
ret += tcrypt_test("xts(twofish)");
break;
case 9:
ret += tcrypt_test("ecb(serpent)");
ret += tcrypt_test("cbc(serpent)");
ret += tcrypt_test("ctr(serpent)");
ret += tcrypt_test("lrw(serpent)");
ret += tcrypt_test("xts(serpent)");
break;
case 10:
@ -1045,6 +1252,14 @@ static int do_test(int m)
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 203:
@ -1089,6 +1304,29 @@ static int do_test(int m)
speed_template_16_32);
break;
case 207:
test_cipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 300:
/* fall through */
@ -1241,6 +1479,78 @@ static int do_test(int m)
case 499:
break;
case 500:
test_acipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 501:
test_acipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
break;
case 502:
test_acipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
break;
case 503:
test_acipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 1000:
test_available();
break;

Просмотреть файл

@ -51,7 +51,9 @@ static u8 speed_template_8_32[] = {8, 32, 0};
static u8 speed_template_16_32[] = {16, 32, 0};
static u8 speed_template_16_24_32[] = {16, 24, 32, 0};
static u8 speed_template_32_40_48[] = {32, 40, 48, 0};
static u8 speed_template_32_48[] = {32, 48, 0};
static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
static u8 speed_template_32_64[] = {32, 64, 0};
/*
* Digest speed tests

Просмотреть файл

@ -1534,6 +1534,21 @@ static int alg_test_null(const struct alg_test_desc *desc,
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "__cbc-serpent-sse2",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__driver-cbc-aes-aesni",
.test = alg_test_null,
.suite = {
@ -1548,6 +1563,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "__driver-cbc-serpent-sse2",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__driver-ecb-aes-aesni",
.test = alg_test_null,
@ -1563,6 +1593,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "__driver-ecb-serpent-sse2",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__ghash-pclmulqdqni",
.test = alg_test_null,
@ -1674,6 +1719,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "cbc(serpent)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = serpent_cbc_enc_tv_template,
.count = SERPENT_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = serpent_cbc_dec_tv_template,
.count = SERPENT_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cbc(twofish)",
.test = alg_test_skcipher,
@ -1730,6 +1790,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "cryptd(__driver-ecb-serpent-sse2)",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "cryptd(__ghash-pclmulqdqni)",
.test = alg_test_null,
@ -1770,6 +1845,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "ctr(serpent)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = serpent_ctr_enc_tv_template,
.count = SERPENT_CTR_ENC_TEST_VECTORS
},
.dec = {
.vecs = serpent_ctr_dec_tv_template,
.count = SERPENT_CTR_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ctr(twofish)",
.test = alg_test_skcipher,
@ -2206,6 +2296,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "lrw(serpent)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = serpent_lrw_enc_tv_template,
.count = SERPENT_LRW_ENC_TEST_VECTORS
},
.dec = {
.vecs = serpent_lrw_dec_tv_template,
.count = SERPENT_LRW_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "lrw(twofish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = tf_lrw_enc_tv_template,
.count = TF_LRW_ENC_TEST_VECTORS
},
.dec = {
.vecs = tf_lrw_dec_tv_template,
.count = TF_LRW_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "lzo",
.test = alg_test_comp,
@ -2513,6 +2633,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "xts(serpent)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = serpent_xts_enc_tv_template,
.count = SERPENT_XTS_ENC_TEST_VECTORS
},
.dec = {
.vecs = serpent_xts_dec_tv_template,
.count = SERPENT_XTS_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "xts(twofish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = tf_xts_enc_tv_template,
.count = TF_XTS_ENC_TEST_VECTORS
},
.dec = {
.vecs = tf_xts_dec_tv_template,
.count = TF_XTS_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "zlib",
.test = alg_test_pcomp,

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -580,12 +580,9 @@ static const u8 calc_sb_tbl[512] = {
ctx->a[(j) + 1] = rol32(y, 9)
/* Perform the key setup. */
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
unsigned int key_len, u32 *flags)
{
struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int i, j, k;
/* Temporaries for CALC_K. */
@ -701,7 +698,13 @@ int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
return 0;
}
EXPORT_SYMBOL_GPL(__twofish_setkey);
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
{
return __twofish_setkey(crypto_tfm_ctx(tfm), key, key_len,
&tfm->crt_flags);
}
EXPORT_SYMBOL_GPL(twofish_setkey);
MODULE_LICENSE("GPL");

Просмотреть файл

@ -21,6 +21,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <crypto/xts.h>
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
@ -96,7 +97,7 @@ static int crypt(struct blkcipher_desc *d,
{
int err;
unsigned int avail;
const int bs = crypto_cipher_blocksize(ctx->child);
const int bs = XTS_BLOCK_SIZE;
struct sinfo s = {
.tfm = crypto_cipher_tfm(ctx->child),
.fn = fn
@ -165,6 +166,78 @@ static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
crypto_cipher_alg(ctx->child)->cia_decrypt);
}
int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
struct scatterlist *ssrc, unsigned int nbytes,
struct xts_crypt_req *req)
{
const unsigned int bsize = XTS_BLOCK_SIZE;
const unsigned int max_blks = req->tbuflen / bsize;
struct blkcipher_walk walk;
unsigned int nblocks;
be128 *src, *dst, *t;
be128 *t_buf = req->tbuf;
int err, i;
BUG_ON(max_blks < 1);
blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
err = blkcipher_walk_virt(desc, &walk);
nbytes = walk.nbytes;
if (!nbytes)
return err;
nblocks = min(nbytes / bsize, max_blks);
src = (be128 *)walk.src.virt.addr;
dst = (be128 *)walk.dst.virt.addr;
/* calculate first value of T */
req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
i = 0;
goto first;
for (;;) {
do {
for (i = 0; i < nblocks; i++) {
gf128mul_x_ble(&t_buf[i], t);
first:
t = &t_buf[i];
/* PP <- T xor P */
be128_xor(dst + i, t, src + i);
}
/* CC <- E(Key2,PP) */
req->crypt_fn(req->crypt_ctx, (u8 *)dst,
nblocks * bsize);
/* C <- T xor CC */
for (i = 0; i < nblocks; i++)
be128_xor(dst + i, dst + i, &t_buf[i]);
src += nblocks;
dst += nblocks;
nbytes -= nblocks * bsize;
nblocks = min(nbytes / bsize, max_blks);
} while (nblocks > 0);
*(be128 *)walk.iv = *t;
err = blkcipher_walk_done(desc, &walk, nbytes);
nbytes = walk.nbytes;
if (!nbytes)
break;
nblocks = min(nbytes / bsize, max_blks);
src = (be128 *)walk.src.virt.addr;
dst = (be128 *)walk.dst.virt.addr;
}
return err;
}
EXPORT_SYMBOL_GPL(xts_crypt);
static int init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
@ -177,7 +250,7 @@ static int init_tfm(struct crypto_tfm *tfm)
if (IS_ERR(cipher))
return PTR_ERR(cipher);
if (crypto_cipher_blocksize(cipher) != 16) {
if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
crypto_free_cipher(cipher);
return -EINVAL;
@ -192,7 +265,7 @@ static int init_tfm(struct crypto_tfm *tfm)
}
/* this check isn't really needed, leave it here just in case */
if (crypto_cipher_blocksize(cipher) != 16) {
if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
crypto_free_cipher(cipher);
crypto_free_cipher(ctx->child);
*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;

Просмотреть файл

@ -141,17 +141,7 @@ static struct platform_driver atmel_trng_driver = {
},
};
static int __init atmel_trng_init(void)
{
return platform_driver_register(&atmel_trng_driver);
}
module_init(atmel_trng_init);
static void __exit atmel_trng_exit(void)
{
platform_driver_unregister(&atmel_trng_driver);
}
module_exit(atmel_trng_exit);
module_platform_driver(atmel_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");

Просмотреть файл

@ -770,15 +770,4 @@ static struct platform_driver n2rng_driver = {
.remove = __devexit_p(n2rng_remove),
};
static int __init n2rng_init(void)
{
return platform_driver_register(&n2rng_driver);
}
static void __exit n2rng_exit(void)
{
platform_driver_unregister(&n2rng_driver);
}
module_init(n2rng_init);
module_exit(n2rng_exit);
module_platform_driver(n2rng_driver);

Просмотреть файл

@ -131,18 +131,7 @@ static struct platform_driver octeon_rng_driver = {
.remove = __exit_p(octeon_rng_remove),
};
static int __init octeon_rng_mod_init(void)
{
return platform_driver_register(&octeon_rng_driver);
}
static void __exit octeon_rng_mod_exit(void)
{
platform_driver_unregister(&octeon_rng_driver);
}
module_init(octeon_rng_mod_init);
module_exit(octeon_rng_mod_exit);
module_platform_driver(octeon_rng_driver);
MODULE_AUTHOR("David Daney");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -148,17 +148,7 @@ static struct platform_driver rng_driver = {
.remove = rng_remove,
};
static int __init rng_init(void)
{
return platform_driver_register(&rng_driver);
}
module_init(rng_init);
static void __exit rng_exit(void)
{
platform_driver_unregister(&rng_driver);
}
module_exit(rng_exit);
module_platform_driver(rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");

Просмотреть файл

@ -191,17 +191,7 @@ static struct platform_driver picoxcell_trng_driver = {
},
};
static int __init picoxcell_trng_init(void)
{
return platform_driver_register(&picoxcell_trng_driver);
}
module_init(picoxcell_trng_init);
static void __exit picoxcell_trng_exit(void)
{
platform_driver_unregister(&picoxcell_trng_driver);
}
module_exit(picoxcell_trng_exit);
module_platform_driver(picoxcell_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");

Просмотреть файл

@ -139,17 +139,7 @@ static struct platform_driver ppc4xx_rng_driver = {
.remove = ppc4xx_rng_remove,
};
static int __init ppc4xx_rng_init(void)
{
return platform_driver_register(&ppc4xx_rng_driver);
}
module_init(ppc4xx_rng_init);
static void __exit ppc4xx_rng_exit(void)
{
platform_driver_unregister(&ppc4xx_rng_driver);
}
module_exit(ppc4xx_rng_exit);
module_platform_driver(ppc4xx_rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");

Просмотреть файл

@ -149,18 +149,7 @@ static struct platform_driver timeriomem_rng_driver = {
.remove = __devexit_p(timeriomem_rng_remove),
};
static int __init timeriomem_rng_init(void)
{
return platform_driver_register(&timeriomem_rng_driver);
}
static void __exit timeriomem_rng_exit(void)
{
platform_driver_unregister(&timeriomem_rng_driver);
}
module_init(timeriomem_rng_init);
module_exit(timeriomem_rng_exit);
module_platform_driver(timeriomem_rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");

Просмотреть файл

@ -1292,18 +1292,7 @@ static struct platform_driver crypto4xx_driver = {
.remove = crypto4xx_remove,
};
static int __init crypto4xx_init(void)
{
return platform_driver_register(&crypto4xx_driver);
}
static void __exit crypto4xx_exit(void)
{
platform_driver_unregister(&crypto4xx_driver);
}
module_init(crypto4xx_init);
module_exit(crypto4xx_exit);
module_platform_driver(crypto4xx_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");

Просмотреть файл

@ -113,7 +113,7 @@ static inline void append_dec_shr_done(u32 *desc)
jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
set_jump_tgt_here(desc, jump_cmd);
append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
/*
@ -213,7 +213,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
set_jump_tgt_here(desc, key_jump_cmd);
/* Propagate errors from shared to job descriptor */
append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
static int aead_set_sh_desc(struct crypto_aead *aead)
@ -310,7 +310,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Only propagate error immediately if shared */
jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
set_jump_tgt_here(desc, jump_cmd);
/* Class 2 operation */
@ -683,7 +683,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
set_jump_tgt_here(desc, key_jump_cmd);
/* Propagate errors from shared to job descriptor */
append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
/* Load iv */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
@ -724,7 +724,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* For aead, only propagate error immediately if shared */
jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
set_jump_tgt_here(desc, jump_cmd);
/* load IV */
@ -1805,6 +1805,25 @@ struct caam_alg_template {
static struct caam_alg_template driver_algs[] = {
/* single-pass ipsec_esp descriptor */
{
.name = "authenc(hmac(md5),cbc(aes))",
.driver_name = "authenc-hmac-md5-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
{
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
@ -1864,6 +1883,25 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
{
.name = "authenc(hmac(md5),cbc(des3_ede))",
.driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
{
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
@ -1923,6 +1961,25 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
{
.name = "authenc(hmac(md5),cbc(des))",
.driver_name = "authenc-hmac-md5-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
{
.name = "authenc(hmac(sha1),cbc(des))",
.driver_name = "authenc-hmac-sha1-cbc-des-caam",

Просмотреть файл

@ -28,6 +28,7 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>

Просмотреть файл

@ -52,8 +52,6 @@ static int caam_probe(struct platform_device *pdev)
struct caam_ctrl __iomem *ctrl;
struct caam_full __iomem *topregs;
struct caam_drv_private *ctrlpriv;
struct caam_deco **deco;
u32 deconum;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
@ -92,17 +90,6 @@ static int caam_probe(struct platform_device *pdev)
if (sizeof(dma_addr_t) == sizeof(u64))
dma_set_mask(dev, DMA_BIT_MASK(36));
/* Find out how many DECOs are present */
deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
GFP_KERNEL);
deco = (struct caam_deco __force **)&topregs->deco;
for (d = 0; d < deconum; d++)
ctrlpriv->deco[d] = deco[d];
/*
* Detect and enable JobRs
* First, find out how many ring spec'ed, allocate references
@ -253,18 +240,7 @@ static struct platform_driver caam_driver = {
.remove = __devexit_p(caam_remove),
};
static int __init caam_base_init(void)
{
return platform_driver_register(&caam_driver);
}
static void __exit caam_base_exit(void)
{
return platform_driver_unregister(&caam_driver);
}
module_init(caam_base_init);
module_exit(caam_base_exit);
module_platform_driver(caam_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM request backend");

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -18,9 +18,10 @@
#define PRINT_POS
#endif
#define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT))
#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_CHG_SHARE_OK_NO_PROP << \
LDST_OFFSET_SHIFT))
#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))

Просмотреть файл

@ -657,7 +657,6 @@ struct caam_full {
u64 rsvd[512];
struct caam_assurance assure;
struct caam_queue_if qi;
struct caam_deco *deco;
};
#endif /* REGS_H */

Просмотреть файл

@ -1128,17 +1128,7 @@ static struct platform_driver marvell_crypto = {
};
MODULE_ALIAS("platform:mv_crypto");
static int __init mv_crypto_init(void)
{
return platform_driver_register(&marvell_crypto);
}
module_init(mv_crypto_init);
static void __exit mv_crypto_exit(void)
{
platform_driver_unregister(&marvell_crypto);
}
module_exit(mv_crypto_exit);
module_platform_driver(marvell_crypto);
MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");

Просмотреть файл

@ -873,7 +873,7 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* request for any other size (192 bits) then we need to do a software
* fallback.
*/
if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
ctx->sw_cipher) {
/*
* Set the fallback transform to use the same request flags as
@ -886,7 +886,7 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
if (err)
goto sw_setkey_failed;
} else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
} else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
!ctx->sw_cipher)
err = -EINVAL;
@ -1854,17 +1854,7 @@ static struct platform_driver spacc_driver = {
.id_table = spacc_id_table,
};
static int __init spacc_init(void)
{
return platform_driver_register(&spacc_driver);
}
module_init(spacc_init);
static void __exit spacc_exit(void)
{
platform_driver_unregister(&spacc_driver);
}
module_exit(spacc_exit);
module_platform_driver(spacc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");

Просмотреть файл

@ -683,18 +683,7 @@ static struct platform_driver s5p_aes_crypto = {
},
};
static int __init s5p_aes_mod_init(void)
{
return platform_driver_register(&s5p_aes_crypto);
}
static void __exit s5p_aes_mod_exit(void)
{
platform_driver_unregister(&s5p_aes_crypto);
}
module_init(s5p_aes_mod_init);
module_exit(s5p_aes_mod_exit);
module_platform_driver(s5p_aes_crypto);
MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
MODULE_LICENSE("GPL v2");

Просмотреть файл

@ -99,6 +99,8 @@ struct talitos_request {
/* per-channel fifo management */
struct talitos_channel {
void __iomem *reg;
/* request fifo */
struct talitos_request *fifo;
@ -120,7 +122,7 @@ struct talitos_private {
struct device *dev;
struct platform_device *ofdev;
void __iomem *reg;
int irq;
int irq[2];
/* SEC version geometry (from device tree node) */
unsigned int num_channels;
@ -144,7 +146,7 @@ struct talitos_private {
atomic_t last_chan ____cacheline_aligned;
/* request callback tasklet */
struct tasklet_struct done_task;
struct tasklet_struct done_task[2];
/* list of registered algorithms */
struct list_head alg_list;
@ -157,6 +159,7 @@ struct talitos_private {
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
#define TALITOS_FTR_SHA224_HWINIT 0x00000004
#define TALITOS_FTR_HMAC_OK 0x00000008
static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
{
@ -196,9 +199,9 @@ static int reset_channel(struct device *dev, int ch)
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
&& --timeout)
cpu_relax();
@ -208,12 +211,12 @@ static int reset_channel(struct device *dev, int ch)
}
/* set 36-bit addressing, done writeback enable and done IRQ enable */
setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
/* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
setbits32(priv->reg + TALITOS_CCCR_LO(ch),
setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
TALITOS_CCCR_LO_IWSE);
return 0;
@ -223,13 +226,19 @@ static int reset_device(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
u32 mcr = TALITOS_MCR_SWR;
setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
setbits32(priv->reg + TALITOS_MCR, mcr);
while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
&& --timeout)
cpu_relax();
if (priv->irq[1]) {
mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
setbits32(priv->reg + TALITOS_MCR, mcr);
}
if (timeout == 0) {
dev_err(dev, "failed to reset device\n");
return -EIO;
@ -327,8 +336,9 @@ static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
/* GO! */
wmb();
out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc));
out_be32(priv->reg + TALITOS_FF_LO(ch),
out_be32(priv->chan[ch].reg + TALITOS_FF,
upper_32_bits(request->dma_desc));
out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
lower_32_bits(request->dma_desc));
spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
@ -397,21 +407,32 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
/*
* process completed requests for channels that have done status
*/
static void talitos_done(unsigned long data)
{
struct device *dev = (struct device *)data;
struct talitos_private *priv = dev_get_drvdata(dev);
int ch;
for (ch = 0; ch < priv->num_channels; ch++)
flush_channel(dev, ch, 0, 0);
/* At this point, all completed channels have been processed.
* Unmask done interrupts for channels completed later on.
*/
setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
#define DEF_TALITOS_DONE(name, ch_done_mask) \
static void talitos_done_##name(unsigned long data) \
{ \
struct device *dev = (struct device *)data; \
struct talitos_private *priv = dev_get_drvdata(dev); \
\
if (ch_done_mask & 1) \
flush_channel(dev, 0, 0, 0); \
if (priv->num_channels == 1) \
goto out; \
if (ch_done_mask & (1 << 2)) \
flush_channel(dev, 1, 0, 0); \
if (ch_done_mask & (1 << 4)) \
flush_channel(dev, 2, 0, 0); \
if (ch_done_mask & (1 << 6)) \
flush_channel(dev, 3, 0, 0); \
\
out: \
/* At this point, all completed channels have been processed */ \
/* Unmask done interrupts for channels completed later on. */ \
setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
}
DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
/*
* locate current (offending) descriptor
@ -422,7 +443,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
int tail = priv->chan[ch].tail;
dma_addr_t cur_desc;
cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
tail = (tail + 1) & (priv->fifo_len - 1);
@ -444,7 +465,7 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
int i;
if (!desc_hdr)
desc_hdr = in_be32(priv->reg + TALITOS_DESCBUF(ch));
desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
switch (desc_hdr & DESC_HDR_SEL0_MASK) {
case DESC_HDR_SEL0_AFEU:
@ -506,16 +527,15 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
for (i = 0; i < 8; i++)
dev_err(dev, "DESCBUF 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
}
/*
* recover from error interrupts
*/
static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
{
struct device *dev = (struct device *)data;
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
int ch, error, reset_dev = 0, reset_ch = 0;
@ -528,8 +548,8 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
error = -EINVAL;
v = in_be32(priv->reg + TALITOS_CCPSR(ch));
v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
if (v_lo & TALITOS_CCPSR_LO_DOF) {
dev_err(dev, "double fetch fifo overflow error\n");
@ -567,10 +587,10 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
if (reset_ch) {
reset_channel(dev, ch);
} else {
setbits32(priv->reg + TALITOS_CCCR(ch),
setbits32(priv->chan[ch].reg + TALITOS_CCCR,
TALITOS_CCCR_CONT);
setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
TALITOS_CCCR_CONT) && --timeout)
cpu_relax();
if (timeout == 0) {
@ -580,7 +600,7 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
}
}
}
if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
dev_err(dev, "done overflow, internal time out, or rngu error: "
"ISR 0x%08x_%08x\n", isr, isr_lo);
@ -593,30 +613,35 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
}
}
static irqreturn_t talitos_interrupt(int irq, void *data)
{
struct device *dev = data;
struct talitos_private *priv = dev_get_drvdata(dev);
u32 isr, isr_lo;
isr = in_be32(priv->reg + TALITOS_ISR);
isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
/* Acknowledge interrupt */
out_be32(priv->reg + TALITOS_ICR, isr);
out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
talitos_error((unsigned long)data, isr, isr_lo);
else
if (likely(isr & TALITOS_ISR_CHDONE)) {
/* mask further done interrupts. */
clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
/* done_task will unmask done interrupts at exit */
tasklet_schedule(&priv->done_task);
}
return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
{ \
struct device *dev = data; \
struct talitos_private *priv = dev_get_drvdata(dev); \
u32 isr, isr_lo; \
\
isr = in_be32(priv->reg + TALITOS_ISR); \
isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
/* Acknowledge interrupt */ \
out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
\
if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo)) \
talitos_error(dev, isr, isr_lo); \
else \
if (likely(isr & ch_done_mask)) { \
/* mask further done interrupts. */ \
clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
/* done_task will unmask done interrupts at exit */ \
tasklet_schedule(&priv->done_task[tlet]); \
} \
\
return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
IRQ_NONE; \
}
DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
/*
* hwrng
@ -1874,6 +1899,97 @@ static int ahash_digest(struct ahash_request *areq)
return ahash_process_req(areq, areq->nbytes);
}
struct keyhash_result {
struct completion completion;
int err;
};
static void keyhash_complete(struct crypto_async_request *req, int err)
{
struct keyhash_result *res = req->data;
if (err == -EINPROGRESS)
return;
res->err = err;
complete(&res->completion);
}
static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
u8 *hash)
{
struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct scatterlist sg[1];
struct ahash_request *req;
struct keyhash_result hresult;
int ret;
init_completion(&hresult.completion);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req)
return -ENOMEM;
/* Keep tfm keylen == 0 during hash of the long key */
ctx->keylen = 0;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
keyhash_complete, &hresult);
sg_init_one(&sg[0], key, keylen);
ahash_request_set_crypt(req, sg, hash, keylen);
ret = crypto_ahash_digest(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&hresult.completion);
if (!ret)
ret = hresult.err;
break;
default:
break;
}
ahash_request_free(req);
return ret;
}
static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
unsigned int blocksize =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int digestsize = crypto_ahash_digestsize(tfm);
unsigned int keysize = keylen;
u8 hash[SHA512_DIGEST_SIZE];
int ret;
if (keylen <= blocksize)
memcpy(ctx->key, key, keysize);
else {
/* Must get the hash of the long key */
ret = keyhash(tfm, key, keylen, hash);
if (ret) {
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
keysize = digestsize;
memcpy(ctx->key, hash, digestsize);
}
ctx->keylen = keysize;
return 0;
}
struct talitos_alg_template {
u32 type;
union {
@ -2217,6 +2333,138 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_MDEUB |
DESC_HDR_MODE0_MDEUB_SHA512,
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = MD5_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos",
.cra_blocksize = MD5_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_MDEUA |
DESC_HDR_MODE0_MDEU_MD5,
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_MDEUA |
DESC_HDR_MODE0_MDEU_SHA1,
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_MDEUA |
DESC_HDR_MODE0_MDEU_SHA224,
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_MDEUA |
DESC_HDR_MODE0_MDEU_SHA256,
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_MDEUB |
DESC_HDR_MODE0_MDEUB_SHA384,
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_MDEUB |
DESC_HDR_MODE0_MDEUB_SHA512,
}
};
struct talitos_crypto_alg {
@ -2331,12 +2579,15 @@ static int talitos_remove(struct platform_device *ofdev)
kfree(priv->chan);
if (priv->irq != NO_IRQ) {
free_irq(priv->irq, dev);
irq_dispose_mapping(priv->irq);
}
for (i = 0; i < 2; i++)
if (priv->irq[i]) {
free_irq(priv->irq[i], dev);
irq_dispose_mapping(priv->irq[i]);
}
tasklet_kill(&priv->done_task);
tasklet_kill(&priv->done_task[0]);
if (priv->irq[1])
tasklet_kill(&priv->done_task[1]);
iounmap(priv->reg);
@ -2373,8 +2624,14 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
!strncmp(alg->cra_name, "hmac", 4)) {
kfree(t_alg);
return ERR_PTR(-ENOTSUPP);
}
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
!strcmp(alg->cra_name, "sha224")) {
(!strcmp(alg->cra_name, "sha224") ||
!strcmp(alg->cra_name, "hmac(sha224)"))) {
t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
t_alg->algt.desc_hdr_template =
DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2397,6 +2654,54 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
return t_alg;
}
static int talitos_probe_irq(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct talitos_private *priv = dev_get_drvdata(dev);
int err;
priv->irq[0] = irq_of_parse_and_map(np, 0);
if (!priv->irq[0]) {
dev_err(dev, "failed to map irq\n");
return -EINVAL;
}
priv->irq[1] = irq_of_parse_and_map(np, 1);
/* get the primary irq line */
if (!priv->irq[1]) {
err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
dev_driver_string(dev), dev);
goto primary_out;
}
err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
dev_driver_string(dev), dev);
if (err)
goto primary_out;
/* get the secondary irq line */
err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
dev_driver_string(dev), dev);
if (err) {
dev_err(dev, "failed to request secondary irq\n");
irq_dispose_mapping(priv->irq[1]);
priv->irq[1] = 0;
}
return err;
primary_out:
if (err) {
dev_err(dev, "failed to request primary irq\n");
irq_dispose_mapping(priv->irq[0]);
priv->irq[0] = 0;
}
return err;
}
static int talitos_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
@ -2413,28 +2718,22 @@ static int talitos_probe(struct platform_device *ofdev)
priv->ofdev = ofdev;
tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
err = talitos_probe_irq(ofdev);
if (err)
goto err_out;
if (!priv->irq[1]) {
tasklet_init(&priv->done_task[0], talitos_done_4ch,
(unsigned long)dev);
} else {
tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
(unsigned long)dev);
tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
(unsigned long)dev);
}
INIT_LIST_HEAD(&priv->alg_list);
priv->irq = irq_of_parse_and_map(np, 0);
if (priv->irq == NO_IRQ) {
dev_err(dev, "failed to map irq\n");
err = -EINVAL;
goto err_out;
}
/* get the irq line */
err = request_irq(priv->irq, talitos_interrupt, 0,
dev_driver_string(dev), dev);
if (err) {
dev_err(dev, "failed to request irq %d\n", priv->irq);
irq_dispose_mapping(priv->irq);
priv->irq = NO_IRQ;
goto err_out;
}
priv->reg = of_iomap(np, 0);
if (!priv->reg) {
dev_err(dev, "failed to of_iomap\n");
@ -2471,7 +2770,8 @@ static int talitos_probe(struct platform_device *ofdev)
if (of_device_is_compatible(np, "fsl,sec2.1"))
priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
TALITOS_FTR_SHA224_HWINIT;
TALITOS_FTR_SHA224_HWINIT |
TALITOS_FTR_HMAC_OK;
priv->chan = kzalloc(sizeof(struct talitos_channel) *
priv->num_channels, GFP_KERNEL);
@ -2481,6 +2781,12 @@ static int talitos_probe(struct platform_device *ofdev)
goto err_out;
}
for (i = 0; i < priv->num_channels; i++) {
priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
if (!priv->irq[1] || !(i & 1))
priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
}
for (i = 0; i < priv->num_channels; i++) {
spin_lock_init(&priv->chan[i].head_lock);
spin_lock_init(&priv->chan[i].tail_lock);
@ -2530,6 +2836,8 @@ static int talitos_probe(struct platform_device *ofdev)
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
if (err == -ENOTSUPP)
continue;
goto err_out;
}
@ -2551,12 +2859,13 @@ static int talitos_probe(struct platform_device *ofdev)
dev_err(dev, "%s alg registration failed\n",
name);
kfree(t_alg);
} else {
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
dev_info(dev, "%s\n", name);
}
}
}
if (!list_empty(&priv->alg_list))
dev_info(dev, "%s algorithms registered in /proc/crypto\n",
(char *)of_get_property(np, "compatible", NULL));
return 0;
@ -2584,17 +2893,7 @@ static struct platform_driver talitos_driver = {
.remove = talitos_remove,
};
static int __init talitos_init(void)
{
return platform_driver_register(&talitos_driver);
}
module_init(talitos_init);
static void __exit talitos_exit(void)
{
platform_driver_unregister(&talitos_driver);
}
module_exit(talitos_exit);
module_platform_driver(talitos_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");

Просмотреть файл

@ -1,7 +1,7 @@
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
* Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
* Copyright (c) 2006-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -34,28 +34,37 @@
/* global register offset addresses */
#define TALITOS_MCR 0x1030 /* master control register */
#define TALITOS_MCR_LO 0x1038
#define TALITOS_MCR_RCA0 (1 << 15) /* remap channel 0 */
#define TALITOS_MCR_RCA1 (1 << 14) /* remap channel 1 */
#define TALITOS_MCR_RCA2 (1 << 13) /* remap channel 2 */
#define TALITOS_MCR_RCA3 (1 << 12) /* remap channel 3 */
#define TALITOS_MCR_SWR 0x1 /* s/w reset */
#define TALITOS_MCR_LO 0x1034
#define TALITOS_IMR 0x1008 /* interrupt mask register */
#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */
#define TALITOS_IMR_DONE 0x00055 /* done IRQs */
#define TALITOS_IMR_LO 0x100C
#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */
#define TALITOS_ISR 0x1010 /* interrupt status register */
#define TALITOS_ISR_CHERR 0xaa /* channel errors mask */
#define TALITOS_ISR_CHDONE 0x55 /* channel done mask */
#define TALITOS_ISR_4CHERR 0xaa /* 4 channel errors mask */
#define TALITOS_ISR_4CHDONE 0x55 /* 4 channel done mask */
#define TALITOS_ISR_CH_0_2_ERR 0x22 /* channels 0, 2 errors mask */
#define TALITOS_ISR_CH_0_2_DONE 0x11 /* channels 0, 2 done mask */
#define TALITOS_ISR_CH_1_3_ERR 0x88 /* channels 1, 3 errors mask */
#define TALITOS_ISR_CH_1_3_DONE 0x44 /* channels 1, 3 done mask */
#define TALITOS_ISR_LO 0x1014
#define TALITOS_ICR 0x1018 /* interrupt clear register */
#define TALITOS_ICR_LO 0x101C
/* channel register address stride */
#define TALITOS_CH_BASE_OFFSET 0x1000 /* default channel map base */
#define TALITOS_CH_STRIDE 0x100
/* channel configuration register */
#define TALITOS_CCCR(ch) (ch * TALITOS_CH_STRIDE + 0x1108)
#define TALITOS_CCCR 0x8
#define TALITOS_CCCR_CONT 0x2 /* channel continue */
#define TALITOS_CCCR_RESET 0x1 /* channel reset */
#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
#define TALITOS_CCCR_LO 0xc
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
@ -63,8 +72,8 @@
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
/* CCPSR: channel pointer status register */
#define TALITOS_CCPSR(ch) (ch * TALITOS_CH_STRIDE + 0x1110)
#define TALITOS_CCPSR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1114)
#define TALITOS_CCPSR 0x10
#define TALITOS_CCPSR_LO 0x14
#define TALITOS_CCPSR_LO_DOF 0x8000 /* double FF write oflow error */
#define TALITOS_CCPSR_LO_SOF 0x4000 /* single FF write oflow error */
#define TALITOS_CCPSR_LO_MDTE 0x2000 /* master data transfer error */
@ -79,24 +88,24 @@
#define TALITOS_CCPSR_LO_SRL 0x0010 /* scatter return/length error */
/* channel fetch fifo register */
#define TALITOS_FF(ch) (ch * TALITOS_CH_STRIDE + 0x1148)
#define TALITOS_FF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x114c)
#define TALITOS_FF 0x48
#define TALITOS_FF_LO 0x4c
/* current descriptor pointer register */
#define TALITOS_CDPR(ch) (ch * TALITOS_CH_STRIDE + 0x1140)
#define TALITOS_CDPR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1144)
#define TALITOS_CDPR 0x40
#define TALITOS_CDPR_LO 0x44
/* descriptor buffer register */
#define TALITOS_DESCBUF(ch) (ch * TALITOS_CH_STRIDE + 0x1180)
#define TALITOS_DESCBUF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1184)
#define TALITOS_DESCBUF 0x80
#define TALITOS_DESCBUF_LO 0x84
/* gather link table */
#define TALITOS_GATHER(ch) (ch * TALITOS_CH_STRIDE + 0x11c0)
#define TALITOS_GATHER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11c4)
#define TALITOS_GATHER 0xc0
#define TALITOS_GATHER_LO 0xc4
/* scatter link table */
#define TALITOS_SCATTER(ch) (ch * TALITOS_CH_STRIDE + 0x11e0)
#define TALITOS_SCATTER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11e4)
#define TALITOS_SCATTER 0xe0
#define TALITOS_SCATTER_LO 0xe4
/* execution unit interrupt status registers */
#define TALITOS_DEUISR 0x2030 /* DES unit */

Просмотреть файл

@ -134,6 +134,7 @@ struct crypto_template *crypto_lookup_template(const char *name);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
int crypto_unregister_instance(struct crypto_alg *alg);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst, u32 mask);

43
include/crypto/lrw.h Normal file
Просмотреть файл

@ -0,0 +1,43 @@
#ifndef _CRYPTO_LRW_H
#define _CRYPTO_LRW_H
#include <crypto/b128ops.h>
struct scatterlist;
struct gf128mul_64k;
struct blkcipher_desc;
#define LRW_BLOCK_SIZE 16
struct lrw_table_ctx {
/* optimizes multiplying a random (non incrementing, as at the
* start of a new sector) value with key2, we could also have
* used 4k optimization tables or no optimization at all. In the
* latter case we would have to store key2 here */
struct gf128mul_64k *table;
/* stores:
* key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
* key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
* key2*{ 0,0,...1,1,1,1,1 }, etc
* needed for optimized multiplication of incrementing values
* with key2 */
be128 mulinc[128];
};
int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak);
void lrw_free_table(struct lrw_table_ctx *ctx);
struct lrw_crypt_req {
be128 *tbuf;
unsigned int tbuflen;
struct lrw_table_ctx *table_ctx;
void *crypt_ctx;
void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
};
int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes,
struct lrw_crypt_req *req);
#endif /* _CRYPTO_LRW_H */

27
include/crypto/serpent.h Normal file
Просмотреть файл

@ -0,0 +1,27 @@
/*
* Common values for serpent algorithms
*/
#ifndef _CRYPTO_SERPENT_H
#define _CRYPTO_SERPENT_H
#include <linux/types.h>
#include <linux/crypto.h>
#define SERPENT_MIN_KEY_SIZE 0
#define SERPENT_MAX_KEY_SIZE 32
#define SERPENT_EXPKEY_WORDS 132
#define SERPENT_BLOCK_SIZE 16
struct serpent_ctx {
u32 expkey[SERPENT_EXPKEY_WORDS];
};
int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
unsigned int keylen);
int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
#endif

Просмотреть файл

@ -17,6 +17,8 @@ struct twofish_ctx {
u32 s[4][256], w[8], k[32];
};
int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
unsigned int key_len, u32 *flags);
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len);
#endif

27
include/crypto/xts.h Normal file
Просмотреть файл

@ -0,0 +1,27 @@
#ifndef _CRYPTO_XTS_H
#define _CRYPTO_XTS_H
#include <crypto/b128ops.h>
struct scatterlist;
struct blkcipher_desc;
#define XTS_BLOCK_SIZE 16
struct xts_crypt_req {
be128 *tbuf;
unsigned int tbuflen;
void *tweak_ctx;
void (*tweak_fn)(void *ctx, u8* dst, const u8* src);
void *crypt_ctx;
void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
};
#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes,
struct xts_crypt_req *req);
#endif /* _CRYPTO_XTS_H */