Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "Here is the crypto update for 3.12: - Added MODULE_SOFTDEP to allow pre-loading of modules. - Reinstated crct10dif driver using the module softdep feature. - Allow via rng driver to be auto-loaded. - Split large input data when necessary in nx. - Handle zero length messages correctly for GCM/XCBC in nx. - Handle SHA-2 chunks bigger than block size properly in nx. - Handle unaligned lengths in omap-aes. - Added SHA384/SHA512 to omap-sham. - Added OMAP5/AM43XX SHAM support. - Added OMAP4 TRNG support. - Misc fixes" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (66 commits) Reinstate "crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework" hwrng: via - Add MODULE_DEVICE_TABLE crypto: fcrypt - Fix bitoperation for compilation with clang crypto: nx - fix SHA-2 for chunks bigger than block size crypto: nx - fix GCM for zero length messages crypto: nx - fix XCBC for zero length messages crypto: nx - fix limits to sg lists for AES-CCM crypto: nx - fix limits to sg lists for AES-XCBC crypto: nx - fix limits to sg lists for AES-GCM crypto: nx - fix limits to sg lists for AES-CTR crypto: nx - fix limits to sg lists for AES-CBC crypto: nx - fix limits to sg lists for AES-ECB crypto: nx - add offset to nx_build_sg_lists() padata - Register hotcpu notifier after initialization padata - share code between CPU_ONLINE and CPU_DOWN_FAILED, same to CPU_DOWN_PREPARE and CPU_UP_CANCELED hwrng: omap - reorder OMAP TRNG driver code crypto: omap-sham - correct dma burst size crypto: omap-sham - Enable Polling mode if DMA fails crypto: tegra-aes - bitwise vs logical and crypto: sahara - checking the wrong variable ...
This commit is contained in:
Коммит
6be48f2940
|
@ -530,12 +530,12 @@ static int __init omap2_init_devices(void)
|
|||
omap_init_mcspi();
|
||||
omap_init_sham();
|
||||
omap_init_aes();
|
||||
omap_init_rng();
|
||||
} else {
|
||||
/* These can be removed when bindings are done */
|
||||
omap_init_wl12xx_of();
|
||||
}
|
||||
omap_init_sti();
|
||||
omap_init_rng();
|
||||
omap_init_vout();
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -27,6 +27,7 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
|
|||
obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
|
||||
obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
|
||||
obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
|
||||
|
||||
# These modules require assembler to support AVX.
|
||||
ifeq ($(avx_supported),yes)
|
||||
|
@ -81,3 +82,4 @@ crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
|
|||
crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o
|
||||
sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o
|
||||
sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
|
||||
crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
|
||||
|
|
|
@ -62,7 +62,7 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
}
|
||||
|
||||
/* camellia sboxes */
|
||||
const u64 camellia_sp10011110[256] = {
|
||||
__visible const u64 camellia_sp10011110[256] = {
|
||||
0x7000007070707000ULL, 0x8200008282828200ULL, 0x2c00002c2c2c2c00ULL,
|
||||
0xec0000ecececec00ULL, 0xb30000b3b3b3b300ULL, 0x2700002727272700ULL,
|
||||
0xc00000c0c0c0c000ULL, 0xe50000e5e5e5e500ULL, 0xe40000e4e4e4e400ULL,
|
||||
|
@ -151,7 +151,7 @@ const u64 camellia_sp10011110[256] = {
|
|||
0x9e00009e9e9e9e00ULL,
|
||||
};
|
||||
|
||||
const u64 camellia_sp22000222[256] = {
|
||||
__visible const u64 camellia_sp22000222[256] = {
|
||||
0xe0e0000000e0e0e0ULL, 0x0505000000050505ULL, 0x5858000000585858ULL,
|
||||
0xd9d9000000d9d9d9ULL, 0x6767000000676767ULL, 0x4e4e0000004e4e4eULL,
|
||||
0x8181000000818181ULL, 0xcbcb000000cbcbcbULL, 0xc9c9000000c9c9c9ULL,
|
||||
|
@ -240,7 +240,7 @@ const u64 camellia_sp22000222[256] = {
|
|||
0x3d3d0000003d3d3dULL,
|
||||
};
|
||||
|
||||
const u64 camellia_sp03303033[256] = {
|
||||
__visible const u64 camellia_sp03303033[256] = {
|
||||
0x0038380038003838ULL, 0x0041410041004141ULL, 0x0016160016001616ULL,
|
||||
0x0076760076007676ULL, 0x00d9d900d900d9d9ULL, 0x0093930093009393ULL,
|
||||
0x0060600060006060ULL, 0x00f2f200f200f2f2ULL, 0x0072720072007272ULL,
|
||||
|
@ -329,7 +329,7 @@ const u64 camellia_sp03303033[256] = {
|
|||
0x004f4f004f004f4fULL,
|
||||
};
|
||||
|
||||
const u64 camellia_sp00444404[256] = {
|
||||
__visible const u64 camellia_sp00444404[256] = {
|
||||
0x0000707070700070ULL, 0x00002c2c2c2c002cULL, 0x0000b3b3b3b300b3ULL,
|
||||
0x0000c0c0c0c000c0ULL, 0x0000e4e4e4e400e4ULL, 0x0000575757570057ULL,
|
||||
0x0000eaeaeaea00eaULL, 0x0000aeaeaeae00aeULL, 0x0000232323230023ULL,
|
||||
|
@ -418,7 +418,7 @@ const u64 camellia_sp00444404[256] = {
|
|||
0x00009e9e9e9e009eULL,
|
||||
};
|
||||
|
||||
const u64 camellia_sp02220222[256] = {
|
||||
__visible const u64 camellia_sp02220222[256] = {
|
||||
0x00e0e0e000e0e0e0ULL, 0x0005050500050505ULL, 0x0058585800585858ULL,
|
||||
0x00d9d9d900d9d9d9ULL, 0x0067676700676767ULL, 0x004e4e4e004e4e4eULL,
|
||||
0x0081818100818181ULL, 0x00cbcbcb00cbcbcbULL, 0x00c9c9c900c9c9c9ULL,
|
||||
|
@ -507,7 +507,7 @@ const u64 camellia_sp02220222[256] = {
|
|||
0x003d3d3d003d3d3dULL,
|
||||
};
|
||||
|
||||
const u64 camellia_sp30333033[256] = {
|
||||
__visible const u64 camellia_sp30333033[256] = {
|
||||
0x3800383838003838ULL, 0x4100414141004141ULL, 0x1600161616001616ULL,
|
||||
0x7600767676007676ULL, 0xd900d9d9d900d9d9ULL, 0x9300939393009393ULL,
|
||||
0x6000606060006060ULL, 0xf200f2f2f200f2f2ULL, 0x7200727272007272ULL,
|
||||
|
@ -596,7 +596,7 @@ const u64 camellia_sp30333033[256] = {
|
|||
0x4f004f4f4f004f4fULL,
|
||||
};
|
||||
|
||||
const u64 camellia_sp44044404[256] = {
|
||||
__visible const u64 camellia_sp44044404[256] = {
|
||||
0x7070007070700070ULL, 0x2c2c002c2c2c002cULL, 0xb3b300b3b3b300b3ULL,
|
||||
0xc0c000c0c0c000c0ULL, 0xe4e400e4e4e400e4ULL, 0x5757005757570057ULL,
|
||||
0xeaea00eaeaea00eaULL, 0xaeae00aeaeae00aeULL, 0x2323002323230023ULL,
|
||||
|
@ -685,7 +685,7 @@ const u64 camellia_sp44044404[256] = {
|
|||
0x9e9e009e9e9e009eULL,
|
||||
};
|
||||
|
||||
const u64 camellia_sp11101110[256] = {
|
||||
__visible const u64 camellia_sp11101110[256] = {
|
||||
0x7070700070707000ULL, 0x8282820082828200ULL, 0x2c2c2c002c2c2c00ULL,
|
||||
0xececec00ececec00ULL, 0xb3b3b300b3b3b300ULL, 0x2727270027272700ULL,
|
||||
0xc0c0c000c0c0c000ULL, 0xe5e5e500e5e5e500ULL, 0xe4e4e400e4e4e400ULL,
|
||||
|
@ -828,8 +828,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
|
||||
subRL[1] ^= (subRL[1] & ~subRL[9]) << 32;
|
||||
/* modified for FLinv(kl2) */
|
||||
dw = (subRL[1] & subRL[9]) >> 32,
|
||||
subRL[1] ^= rol32(dw, 1);
|
||||
dw = (subRL[1] & subRL[9]) >> 32;
|
||||
subRL[1] ^= rol32(dw, 1);
|
||||
|
||||
/* round 8 */
|
||||
subRL[11] ^= subRL[1];
|
||||
|
@ -840,8 +840,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
|
||||
subRL[1] ^= (subRL[1] & ~subRL[17]) << 32;
|
||||
/* modified for FLinv(kl4) */
|
||||
dw = (subRL[1] & subRL[17]) >> 32,
|
||||
subRL[1] ^= rol32(dw, 1);
|
||||
dw = (subRL[1] & subRL[17]) >> 32;
|
||||
subRL[1] ^= rol32(dw, 1);
|
||||
|
||||
/* round 14 */
|
||||
subRL[19] ^= subRL[1];
|
||||
|
@ -859,8 +859,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
} else {
|
||||
subRL[1] ^= (subRL[1] & ~subRL[25]) << 32;
|
||||
/* modified for FLinv(kl6) */
|
||||
dw = (subRL[1] & subRL[25]) >> 32,
|
||||
subRL[1] ^= rol32(dw, 1);
|
||||
dw = (subRL[1] & subRL[25]) >> 32;
|
||||
subRL[1] ^= rol32(dw, 1);
|
||||
|
||||
/* round 20 */
|
||||
subRL[27] ^= subRL[1];
|
||||
|
@ -882,8 +882,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
|
||||
kw4 ^= (kw4 & ~subRL[24]) << 32;
|
||||
/* modified for FL(kl5) */
|
||||
dw = (kw4 & subRL[24]) >> 32,
|
||||
kw4 ^= rol32(dw, 1);
|
||||
dw = (kw4 & subRL[24]) >> 32;
|
||||
kw4 ^= rol32(dw, 1);
|
||||
}
|
||||
|
||||
/* round 17 */
|
||||
|
@ -895,8 +895,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
|
||||
kw4 ^= (kw4 & ~subRL[16]) << 32;
|
||||
/* modified for FL(kl3) */
|
||||
dw = (kw4 & subRL[16]) >> 32,
|
||||
kw4 ^= rol32(dw, 1);
|
||||
dw = (kw4 & subRL[16]) >> 32;
|
||||
kw4 ^= rol32(dw, 1);
|
||||
|
||||
/* round 11 */
|
||||
subRL[14] ^= kw4;
|
||||
|
@ -907,8 +907,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
|
||||
kw4 ^= (kw4 & ~subRL[8]) << 32;
|
||||
/* modified for FL(kl1) */
|
||||
dw = (kw4 & subRL[8]) >> 32,
|
||||
kw4 ^= rol32(dw, 1);
|
||||
dw = (kw4 & subRL[8]) >> 32;
|
||||
kw4 ^= rol32(dw, 1);
|
||||
|
||||
/* round 5 */
|
||||
subRL[6] ^= kw4;
|
||||
|
@ -928,8 +928,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
SET_SUBKEY_LR(6, subRL[5] ^ subRL[7]); /* round 5 */
|
||||
|
||||
tl = (subRL[10] >> 32) ^ (subRL[10] & ~subRL[8]);
|
||||
dw = tl & (subRL[8] >> 32), /* FL(kl1) */
|
||||
tr = subRL[10] ^ rol32(dw, 1);
|
||||
dw = tl & (subRL[8] >> 32); /* FL(kl1) */
|
||||
tr = subRL[10] ^ rol32(dw, 1);
|
||||
tt = (tr | ((u64)tl << 32));
|
||||
|
||||
SET_SUBKEY_LR(7, subRL[6] ^ tt); /* round 6 */
|
||||
|
@ -937,8 +937,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
SET_SUBKEY_LR(9, subRL[9]); /* FLinv(kl2) */
|
||||
|
||||
tl = (subRL[7] >> 32) ^ (subRL[7] & ~subRL[9]);
|
||||
dw = tl & (subRL[9] >> 32), /* FLinv(kl2) */
|
||||
tr = subRL[7] ^ rol32(dw, 1);
|
||||
dw = tl & (subRL[9] >> 32); /* FLinv(kl2) */
|
||||
tr = subRL[7] ^ rol32(dw, 1);
|
||||
tt = (tr | ((u64)tl << 32));
|
||||
|
||||
SET_SUBKEY_LR(10, subRL[11] ^ tt); /* round 7 */
|
||||
|
@ -948,8 +948,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
SET_SUBKEY_LR(14, subRL[13] ^ subRL[15]); /* round 11 */
|
||||
|
||||
tl = (subRL[18] >> 32) ^ (subRL[18] & ~subRL[16]);
|
||||
dw = tl & (subRL[16] >> 32), /* FL(kl3) */
|
||||
tr = subRL[18] ^ rol32(dw, 1);
|
||||
dw = tl & (subRL[16] >> 32); /* FL(kl3) */
|
||||
tr = subRL[18] ^ rol32(dw, 1);
|
||||
tt = (tr | ((u64)tl << 32));
|
||||
|
||||
SET_SUBKEY_LR(15, subRL[14] ^ tt); /* round 12 */
|
||||
|
@ -957,8 +957,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
SET_SUBKEY_LR(17, subRL[17]); /* FLinv(kl4) */
|
||||
|
||||
tl = (subRL[15] >> 32) ^ (subRL[15] & ~subRL[17]);
|
||||
dw = tl & (subRL[17] >> 32), /* FLinv(kl4) */
|
||||
tr = subRL[15] ^ rol32(dw, 1);
|
||||
dw = tl & (subRL[17] >> 32); /* FLinv(kl4) */
|
||||
tr = subRL[15] ^ rol32(dw, 1);
|
||||
tt = (tr | ((u64)tl << 32));
|
||||
|
||||
SET_SUBKEY_LR(18, subRL[19] ^ tt); /* round 13 */
|
||||
|
@ -972,8 +972,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
SET_SUBKEY_LR(24, subRL[24] ^ subRL[23]); /* kw3 */
|
||||
} else {
|
||||
tl = (subRL[26] >> 32) ^ (subRL[26] & ~subRL[24]);
|
||||
dw = tl & (subRL[24] >> 32), /* FL(kl5) */
|
||||
tr = subRL[26] ^ rol32(dw, 1);
|
||||
dw = tl & (subRL[24] >> 32); /* FL(kl5) */
|
||||
tr = subRL[26] ^ rol32(dw, 1);
|
||||
tt = (tr | ((u64)tl << 32));
|
||||
|
||||
SET_SUBKEY_LR(23, subRL[22] ^ tt); /* round 18 */
|
||||
|
@ -981,8 +981,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
|
|||
SET_SUBKEY_LR(25, subRL[25]); /* FLinv(kl6) */
|
||||
|
||||
tl = (subRL[23] >> 32) ^ (subRL[23] & ~subRL[25]);
|
||||
dw = tl & (subRL[25] >> 32), /* FLinv(kl6) */
|
||||
tr = subRL[23] ^ rol32(dw, 1);
|
||||
dw = tl & (subRL[25] >> 32); /* FLinv(kl6) */
|
||||
tr = subRL[23] ^ rol32(dw, 1);
|
||||
tt = (tr | ((u64)tl << 32));
|
||||
|
||||
SET_SUBKEY_LR(26, subRL[27] ^ tt); /* round 19 */
|
||||
|
|
|
@ -0,0 +1,643 @@
|
|||
########################################################################
|
||||
# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
|
||||
#
|
||||
# Copyright (c) 2013, Intel Corporation
|
||||
#
|
||||
# Authors:
|
||||
# Erdinc Ozturk <erdinc.ozturk@intel.com>
|
||||
# Vinodh Gopal <vinodh.gopal@intel.com>
|
||||
# James Guilford <james.guilford@intel.com>
|
||||
# Tim Chen <tim.c.chen@linux.intel.com>
|
||||
#
|
||||
# This software is available to you under a choice of one of two
|
||||
# licenses. You may choose to be licensed under the terms of the GNU
|
||||
# General Public License (GPL) Version 2, available from the file
|
||||
# COPYING in the main directory of this source tree, or the
|
||||
# OpenIB.org BSD license below:
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# * Neither the name of the Intel Corporation nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
|
||||
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
|
||||
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
########################################################################
|
||||
# Function API:
|
||||
# UINT16 crc_t10dif_pcl(
|
||||
# UINT16 init_crc, //initial CRC value, 16 bits
|
||||
# const unsigned char *buf, //buffer pointer to calculate CRC on
|
||||
# UINT64 len //buffer length in bytes (64-bit data)
|
||||
# );
|
||||
#
|
||||
# Reference paper titled "Fast CRC Computation for Generic
|
||||
# Polynomials Using PCLMULQDQ Instruction"
|
||||
# URL: http://www.intel.com/content/dam/www/public/us/en/documents
|
||||
# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
|
||||
#
|
||||
#
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
|
||||
#define arg1 %rdi
|
||||
#define arg2 %rsi
|
||||
#define arg3 %rdx
|
||||
|
||||
#define arg1_low32 %edi
|
||||
|
||||
ENTRY(crc_t10dif_pcl)
|
||||
.align 16
|
||||
|
||||
# adjust the 16-bit initial_crc value, scale it to 32 bits
|
||||
shl $16, arg1_low32
|
||||
|
||||
# Allocate Stack Space
|
||||
mov %rsp, %rcx
|
||||
sub $16*2, %rsp
|
||||
# align stack to 16 byte boundary
|
||||
and $~(0x10 - 1), %rsp
|
||||
|
||||
# check if smaller than 256
|
||||
cmp $256, arg3
|
||||
|
||||
# for sizes less than 128, we can't fold 64B at a time...
|
||||
jl _less_than_128
|
||||
|
||||
|
||||
# load the initial crc value
|
||||
movd arg1_low32, %xmm10 # initial crc
|
||||
|
||||
# crc value does not need to be byte-reflected, but it needs
|
||||
# to be moved to the high part of the register.
|
||||
# because data will be byte-reflected and will align with
|
||||
# initial crc at correct place.
|
||||
pslldq $12, %xmm10
|
||||
|
||||
movdqa SHUF_MASK(%rip), %xmm11
|
||||
# receive the initial 64B data, xor the initial crc value
|
||||
movdqu 16*0(arg2), %xmm0
|
||||
movdqu 16*1(arg2), %xmm1
|
||||
movdqu 16*2(arg2), %xmm2
|
||||
movdqu 16*3(arg2), %xmm3
|
||||
movdqu 16*4(arg2), %xmm4
|
||||
movdqu 16*5(arg2), %xmm5
|
||||
movdqu 16*6(arg2), %xmm6
|
||||
movdqu 16*7(arg2), %xmm7
|
||||
|
||||
pshufb %xmm11, %xmm0
|
||||
# XOR the initial_crc value
|
||||
pxor %xmm10, %xmm0
|
||||
pshufb %xmm11, %xmm1
|
||||
pshufb %xmm11, %xmm2
|
||||
pshufb %xmm11, %xmm3
|
||||
pshufb %xmm11, %xmm4
|
||||
pshufb %xmm11, %xmm5
|
||||
pshufb %xmm11, %xmm6
|
||||
pshufb %xmm11, %xmm7
|
||||
|
||||
movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4
|
||||
#imm value of pclmulqdq instruction
|
||||
#will determine which constant to use
|
||||
|
||||
#################################################################
|
||||
# we subtract 256 instead of 128 to save one instruction from the loop
|
||||
sub $256, arg3
|
||||
|
||||
# at this section of the code, there is 64*x+y (0<=y<64) bytes of
|
||||
# buffer. The _fold_64_B_loop will fold 64B at a time
|
||||
# until we have 64+y Bytes of buffer
|
||||
|
||||
|
||||
# fold 64B at a time. This section of the code folds 4 xmm
|
||||
# registers in parallel
|
||||
_fold_64_B_loop:
|
||||
|
||||
# update the buffer pointer
|
||||
add $128, arg2 # buf += 64#
|
||||
|
||||
movdqu 16*0(arg2), %xmm9
|
||||
movdqu 16*1(arg2), %xmm12
|
||||
pshufb %xmm11, %xmm9
|
||||
pshufb %xmm11, %xmm12
|
||||
movdqa %xmm0, %xmm8
|
||||
movdqa %xmm1, %xmm13
|
||||
pclmulqdq $0x0 , %xmm10, %xmm0
|
||||
pclmulqdq $0x11, %xmm10, %xmm8
|
||||
pclmulqdq $0x0 , %xmm10, %xmm1
|
||||
pclmulqdq $0x11, %xmm10, %xmm13
|
||||
pxor %xmm9 , %xmm0
|
||||
xorps %xmm8 , %xmm0
|
||||
pxor %xmm12, %xmm1
|
||||
xorps %xmm13, %xmm1
|
||||
|
||||
movdqu 16*2(arg2), %xmm9
|
||||
movdqu 16*3(arg2), %xmm12
|
||||
pshufb %xmm11, %xmm9
|
||||
pshufb %xmm11, %xmm12
|
||||
movdqa %xmm2, %xmm8
|
||||
movdqa %xmm3, %xmm13
|
||||
pclmulqdq $0x0, %xmm10, %xmm2
|
||||
pclmulqdq $0x11, %xmm10, %xmm8
|
||||
pclmulqdq $0x0, %xmm10, %xmm3
|
||||
pclmulqdq $0x11, %xmm10, %xmm13
|
||||
pxor %xmm9 , %xmm2
|
||||
xorps %xmm8 , %xmm2
|
||||
pxor %xmm12, %xmm3
|
||||
xorps %xmm13, %xmm3
|
||||
|
||||
movdqu 16*4(arg2), %xmm9
|
||||
movdqu 16*5(arg2), %xmm12
|
||||
pshufb %xmm11, %xmm9
|
||||
pshufb %xmm11, %xmm12
|
||||
movdqa %xmm4, %xmm8
|
||||
movdqa %xmm5, %xmm13
|
||||
pclmulqdq $0x0, %xmm10, %xmm4
|
||||
pclmulqdq $0x11, %xmm10, %xmm8
|
||||
pclmulqdq $0x0, %xmm10, %xmm5
|
||||
pclmulqdq $0x11, %xmm10, %xmm13
|
||||
pxor %xmm9 , %xmm4
|
||||
xorps %xmm8 , %xmm4
|
||||
pxor %xmm12, %xmm5
|
||||
xorps %xmm13, %xmm5
|
||||
|
||||
movdqu 16*6(arg2), %xmm9
|
||||
movdqu 16*7(arg2), %xmm12
|
||||
pshufb %xmm11, %xmm9
|
||||
pshufb %xmm11, %xmm12
|
||||
movdqa %xmm6 , %xmm8
|
||||
movdqa %xmm7 , %xmm13
|
||||
pclmulqdq $0x0 , %xmm10, %xmm6
|
||||
pclmulqdq $0x11, %xmm10, %xmm8
|
||||
pclmulqdq $0x0 , %xmm10, %xmm7
|
||||
pclmulqdq $0x11, %xmm10, %xmm13
|
||||
pxor %xmm9 , %xmm6
|
||||
xorps %xmm8 , %xmm6
|
||||
pxor %xmm12, %xmm7
|
||||
xorps %xmm13, %xmm7
|
||||
|
||||
sub $128, arg3
|
||||
|
||||
# check if there is another 64B in the buffer to be able to fold
|
||||
jge _fold_64_B_loop
|
||||
##################################################################
|
||||
|
||||
|
||||
add $128, arg2
|
||||
# at this point, the buffer pointer is pointing at the last y Bytes
|
||||
# of the buffer the 64B of folded data is in 4 of the xmm
|
||||
# registers: xmm0, xmm1, xmm2, xmm3
|
||||
|
||||
|
||||
# fold the 8 xmm registers to 1 xmm register with different constants
|
||||
|
||||
movdqa rk9(%rip), %xmm10
|
||||
movdqa %xmm0, %xmm8
|
||||
pclmulqdq $0x11, %xmm10, %xmm0
|
||||
pclmulqdq $0x0 , %xmm10, %xmm8
|
||||
pxor %xmm8, %xmm7
|
||||
xorps %xmm0, %xmm7
|
||||
|
||||
movdqa rk11(%rip), %xmm10
|
||||
movdqa %xmm1, %xmm8
|
||||
pclmulqdq $0x11, %xmm10, %xmm1
|
||||
pclmulqdq $0x0 , %xmm10, %xmm8
|
||||
pxor %xmm8, %xmm7
|
||||
xorps %xmm1, %xmm7
|
||||
|
||||
movdqa rk13(%rip), %xmm10
|
||||
movdqa %xmm2, %xmm8
|
||||
pclmulqdq $0x11, %xmm10, %xmm2
|
||||
pclmulqdq $0x0 , %xmm10, %xmm8
|
||||
pxor %xmm8, %xmm7
|
||||
pxor %xmm2, %xmm7
|
||||
|
||||
movdqa rk15(%rip), %xmm10
|
||||
movdqa %xmm3, %xmm8
|
||||
pclmulqdq $0x11, %xmm10, %xmm3
|
||||
pclmulqdq $0x0 , %xmm10, %xmm8
|
||||
pxor %xmm8, %xmm7
|
||||
xorps %xmm3, %xmm7
|
||||
|
||||
movdqa rk17(%rip), %xmm10
|
||||
movdqa %xmm4, %xmm8
|
||||
pclmulqdq $0x11, %xmm10, %xmm4
|
||||
pclmulqdq $0x0 , %xmm10, %xmm8
|
||||
pxor %xmm8, %xmm7
|
||||
pxor %xmm4, %xmm7
|
||||
|
||||
movdqa rk19(%rip), %xmm10
|
||||
movdqa %xmm5, %xmm8
|
||||
pclmulqdq $0x11, %xmm10, %xmm5
|
||||
pclmulqdq $0x0 , %xmm10, %xmm8
|
||||
pxor %xmm8, %xmm7
|
||||
xorps %xmm5, %xmm7
|
||||
|
||||
movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2
|
||||
#imm value of pclmulqdq instruction
|
||||
#will determine which constant to use
|
||||
movdqa %xmm6, %xmm8
|
||||
pclmulqdq $0x11, %xmm10, %xmm6
|
||||
pclmulqdq $0x0 , %xmm10, %xmm8
|
||||
pxor %xmm8, %xmm7
|
||||
pxor %xmm6, %xmm7
|
||||
|
||||
|
||||
# instead of 64, we add 48 to the loop counter to save 1 instruction
|
||||
# from the loop instead of a cmp instruction, we use the negative
|
||||
# flag with the jl instruction
|
||||
add $128-16, arg3
|
||||
jl _final_reduction_for_128
|
||||
|
||||
# now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7
|
||||
# and the rest is in memory. We can fold 16 bytes at a time if y>=16
|
||||
# continue folding 16B at a time
|
||||
|
||||
_16B_reduction_loop:
|
||||
movdqa %xmm7, %xmm8
|
||||
pclmulqdq $0x11, %xmm10, %xmm7
|
||||
pclmulqdq $0x0 , %xmm10, %xmm8
|
||||
pxor %xmm8, %xmm7
|
||||
movdqu (arg2), %xmm0
|
||||
pshufb %xmm11, %xmm0
|
||||
pxor %xmm0 , %xmm7
|
||||
add $16, arg2
|
||||
sub $16, arg3
|
||||
# instead of a cmp instruction, we utilize the flags with the
|
||||
# jge instruction equivalent of: cmp arg3, 16-16
|
||||
# check if there is any more 16B in the buffer to be able to fold
|
||||
jge _16B_reduction_loop
|
||||
|
||||
#now we have 16+z bytes left to reduce, where 0<= z < 16.
|
||||
#first, we reduce the data in the xmm7 register
|
||||
|
||||
|
||||
_final_reduction_for_128:
|
||||
# check if any more data to fold. If not, compute the CRC of
|
||||
# the final 128 bits
|
||||
add $16, arg3
|
||||
je _128_done
|
||||
|
||||
# here we are getting data that is less than 16 bytes.
|
||||
# since we know that there was data before the pointer, we can
|
||||
# offset the input pointer before the actual point, to receive
|
||||
# exactly 16 bytes. after that the registers need to be adjusted.
|
||||
_get_last_two_xmms:
|
||||
movdqa %xmm7, %xmm2
|
||||
|
||||
movdqu -16(arg2, arg3), %xmm1
|
||||
pshufb %xmm11, %xmm1
|
||||
|
||||
# get rid of the extra data that was loaded before
|
||||
# load the shift constant
|
||||
lea pshufb_shf_table+16(%rip), %rax
|
||||
sub arg3, %rax
|
||||
movdqu (%rax), %xmm0
|
||||
|
||||
# shift xmm2 to the left by arg3 bytes
|
||||
pshufb %xmm0, %xmm2
|
||||
|
||||
# shift xmm7 to the right by 16-arg3 bytes
|
||||
pxor mask1(%rip), %xmm0
|
||||
pshufb %xmm0, %xmm7
|
||||
pblendvb %xmm2, %xmm1 #xmm0 is implicit
|
||||
|
||||
# fold 16 Bytes
|
||||
movdqa %xmm1, %xmm2
|
||||
movdqa %xmm7, %xmm8
|
||||
pclmulqdq $0x11, %xmm10, %xmm7
|
||||
pclmulqdq $0x0 , %xmm10, %xmm8
|
||||
pxor %xmm8, %xmm7
|
||||
pxor %xmm2, %xmm7
|
||||
|
||||
_128_done:
|
||||
# compute crc of a 128-bit value
|
||||
movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10
|
||||
movdqa %xmm7, %xmm0
|
||||
|
||||
#64b fold
|
||||
pclmulqdq $0x1, %xmm10, %xmm7
|
||||
pslldq $8 , %xmm0
|
||||
pxor %xmm0, %xmm7
|
||||
|
||||
#32b fold
|
||||
movdqa %xmm7, %xmm0
|
||||
|
||||
pand mask2(%rip), %xmm0
|
||||
|
||||
psrldq $12, %xmm7
|
||||
pclmulqdq $0x10, %xmm10, %xmm7
|
||||
pxor %xmm0, %xmm7
|
||||
|
||||
#barrett reduction
|
||||
_barrett:
|
||||
movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10
|
||||
movdqa %xmm7, %xmm0
|
||||
pclmulqdq $0x01, %xmm10, %xmm7
|
||||
pslldq $4, %xmm7
|
||||
pclmulqdq $0x11, %xmm10, %xmm7
|
||||
|
||||
pslldq $4, %xmm7
|
||||
pxor %xmm0, %xmm7
|
||||
pextrd $1, %xmm7, %eax
|
||||
|
||||
_cleanup:
|
||||
# scale the result back to 16 bits
|
||||
shr $16, %eax
|
||||
mov %rcx, %rsp
|
||||
ret
|
||||
|
||||
########################################################################
|
||||
|
||||
.align 16
|
||||
_less_than_128:
|
||||
|
||||
# check if there is enough buffer to be able to fold 16B at a time
|
||||
cmp $32, arg3
|
||||
jl _less_than_32
|
||||
movdqa SHUF_MASK(%rip), %xmm11
|
||||
|
||||
# now if there is, load the constants
|
||||
movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
|
||||
|
||||
movd arg1_low32, %xmm0 # get the initial crc value
|
||||
pslldq $12, %xmm0 # align it to its correct place
|
||||
movdqu (arg2), %xmm7 # load the plaintext
|
||||
pshufb %xmm11, %xmm7 # byte-reflect the plaintext
|
||||
pxor %xmm0, %xmm7
|
||||
|
||||
|
||||
# update the buffer pointer
|
||||
add $16, arg2
|
||||
|
||||
# update the counter. subtract 32 instead of 16 to save one
|
||||
# instruction from the loop
|
||||
sub $32, arg3
|
||||
|
||||
jmp _16B_reduction_loop
|
||||
|
||||
|
||||
.align 16
|
||||
_less_than_32:
|
||||
# mov initial crc to the return value. this is necessary for
|
||||
# zero-length buffers.
|
||||
mov arg1_low32, %eax
|
||||
test arg3, arg3
|
||||
je _cleanup
|
||||
|
||||
movdqa SHUF_MASK(%rip), %xmm11
|
||||
|
||||
movd arg1_low32, %xmm0 # get the initial crc value
|
||||
pslldq $12, %xmm0 # align it to its correct place
|
||||
|
||||
cmp $16, arg3
|
||||
je _exact_16_left
|
||||
jl _less_than_16_left
|
||||
|
||||
movdqu (arg2), %xmm7 # load the plaintext
|
||||
pshufb %xmm11, %xmm7 # byte-reflect the plaintext
|
||||
pxor %xmm0 , %xmm7 # xor the initial crc value
|
||||
add $16, arg2
|
||||
sub $16, arg3
|
||||
movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
|
||||
jmp _get_last_two_xmms
|
||||
|
||||
|
||||
.align 16
|
||||
_less_than_16_left:
|
||||
# use stack space to load data less than 16 bytes, zero-out
|
||||
# the 16B in memory first.
|
||||
|
||||
pxor %xmm1, %xmm1
|
||||
mov %rsp, %r11
|
||||
movdqa %xmm1, (%r11)
|
||||
|
||||
cmp $4, arg3
|
||||
jl _only_less_than_4
|
||||
|
||||
# backup the counter value
|
||||
mov arg3, %r9
|
||||
cmp $8, arg3
|
||||
jl _less_than_8_left
|
||||
|
||||
# load 8 Bytes
|
||||
mov (arg2), %rax
|
||||
mov %rax, (%r11)
|
||||
add $8, %r11
|
||||
sub $8, arg3
|
||||
add $8, arg2
|
||||
_less_than_8_left:
|
||||
|
||||
cmp $4, arg3
|
||||
jl _less_than_4_left
|
||||
|
||||
# load 4 Bytes
|
||||
mov (arg2), %eax
|
||||
mov %eax, (%r11)
|
||||
add $4, %r11
|
||||
sub $4, arg3
|
||||
add $4, arg2
|
||||
_less_than_4_left:
|
||||
|
||||
cmp $2, arg3
|
||||
jl _less_than_2_left
|
||||
|
||||
# load 2 Bytes
|
||||
mov (arg2), %ax
|
||||
mov %ax, (%r11)
|
||||
add $2, %r11
|
||||
sub $2, arg3
|
||||
add $2, arg2
|
||||
_less_than_2_left:
|
||||
cmp $1, arg3
|
||||
jl _zero_left
|
||||
|
||||
# load 1 Byte
|
||||
mov (arg2), %al
|
||||
mov %al, (%r11)
|
||||
_zero_left:
|
||||
movdqa (%rsp), %xmm7
|
||||
pshufb %xmm11, %xmm7
|
||||
pxor %xmm0 , %xmm7 # xor the initial crc value
|
||||
|
||||
# shl r9, 4
|
||||
lea pshufb_shf_table+16(%rip), %rax
|
||||
sub %r9, %rax
|
||||
movdqu (%rax), %xmm0
|
||||
pxor mask1(%rip), %xmm0
|
||||
|
||||
pshufb %xmm0, %xmm7
|
||||
jmp _128_done
|
||||
|
||||
.align 16
|
||||
_exact_16_left:
|
||||
movdqu (arg2), %xmm7
|
||||
pshufb %xmm11, %xmm7
|
||||
pxor %xmm0 , %xmm7 # xor the initial crc value
|
||||
|
||||
jmp _128_done
|
||||
|
||||
_only_less_than_4:
|
||||
cmp $3, arg3
|
||||
jl _only_less_than_3
|
||||
|
||||
# load 3 Bytes
|
||||
mov (arg2), %al
|
||||
mov %al, (%r11)
|
||||
|
||||
mov 1(arg2), %al
|
||||
mov %al, 1(%r11)
|
||||
|
||||
mov 2(arg2), %al
|
||||
mov %al, 2(%r11)
|
||||
|
||||
movdqa (%rsp), %xmm7
|
||||
pshufb %xmm11, %xmm7
|
||||
pxor %xmm0 , %xmm7 # xor the initial crc value
|
||||
|
||||
psrldq $5, %xmm7
|
||||
|
||||
jmp _barrett
|
||||
_only_less_than_3:
|
||||
cmp $2, arg3
|
||||
jl _only_less_than_2
|
||||
|
||||
# load 2 Bytes
|
||||
mov (arg2), %al
|
||||
mov %al, (%r11)
|
||||
|
||||
mov 1(arg2), %al
|
||||
mov %al, 1(%r11)
|
||||
|
||||
movdqa (%rsp), %xmm7
|
||||
pshufb %xmm11, %xmm7
|
||||
pxor %xmm0 , %xmm7 # xor the initial crc value
|
||||
|
||||
psrldq $6, %xmm7
|
||||
|
||||
jmp _barrett
|
||||
_only_less_than_2:
|
||||
|
||||
# load 1 Byte
|
||||
mov (arg2), %al
|
||||
mov %al, (%r11)
|
||||
|
||||
movdqa (%rsp), %xmm7
|
||||
pshufb %xmm11, %xmm7
|
||||
pxor %xmm0 , %xmm7 # xor the initial crc value
|
||||
|
||||
psrldq $7, %xmm7
|
||||
|
||||
jmp _barrett
|
||||
|
||||
ENDPROC(crc_t10dif_pcl)
|
||||
|
||||
.data
|
||||
|
||||
# precomputed constants
|
||||
# these constants are precomputed from the poly:
|
||||
# 0x8bb70000 (0x8bb7 scaled to 32 bits)
|
||||
.align 16
|
||||
# Q = 0x18BB70000
|
||||
# rk1 = 2^(32*3) mod Q << 32
|
||||
# rk2 = 2^(32*5) mod Q << 32
|
||||
# rk3 = 2^(32*15) mod Q << 32
|
||||
# rk4 = 2^(32*17) mod Q << 32
|
||||
# rk5 = 2^(32*3) mod Q << 32
|
||||
# rk6 = 2^(32*2) mod Q << 32
|
||||
# rk7 = floor(2^64/Q)
|
||||
# rk8 = Q
|
||||
rk1:
|
||||
.quad 0x2d56000000000000
|
||||
rk2:
|
||||
.quad 0x06df000000000000
|
||||
rk3:
|
||||
.quad 0x9d9d000000000000
|
||||
rk4:
|
||||
.quad 0x7cf5000000000000
|
||||
rk5:
|
||||
.quad 0x2d56000000000000
|
||||
rk6:
|
||||
.quad 0x1368000000000000
|
||||
rk7:
|
||||
.quad 0x00000001f65a57f8
|
||||
rk8:
|
||||
.quad 0x000000018bb70000
|
||||
|
||||
rk9:
|
||||
.quad 0xceae000000000000
|
||||
rk10:
|
||||
.quad 0xbfd6000000000000
|
||||
rk11:
|
||||
.quad 0x1e16000000000000
|
||||
rk12:
|
||||
.quad 0x713c000000000000
|
||||
rk13:
|
||||
.quad 0xf7f9000000000000
|
||||
rk14:
|
||||
.quad 0x80a6000000000000
|
||||
rk15:
|
||||
.quad 0x044c000000000000
|
||||
rk16:
|
||||
.quad 0xe658000000000000
|
||||
rk17:
|
||||
.quad 0xad18000000000000
|
||||
rk18:
|
||||
.quad 0xa497000000000000
|
||||
rk19:
|
||||
.quad 0x6ee3000000000000
|
||||
rk20:
|
||||
.quad 0xe7b5000000000000
|
||||
|
||||
|
||||
|
||||
mask1:
|
||||
.octa 0x80808080808080808080808080808080
|
||||
mask2:
|
||||
.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
|
||||
|
||||
SHUF_MASK:
|
||||
.octa 0x000102030405060708090A0B0C0D0E0F
|
||||
|
||||
pshufb_shf_table:
|
||||
# use these values for shift constants for the pshufb instruction
|
||||
# different alignments result in values as shown:
|
||||
# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
|
||||
# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
|
||||
# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
|
||||
# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
|
||||
# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
|
||||
# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
|
||||
# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
|
||||
# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
|
||||
# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
|
||||
# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
|
||||
# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
|
||||
# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
|
||||
# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
|
||||
# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
|
||||
# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
|
||||
.octa 0x8f8e8d8c8b8a89888786858483828100
|
||||
.octa 0x000e0d0c0b0a09080706050403020100
|
|
@ -0,0 +1,151 @@
|
|||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions
|
||||
*
|
||||
* Copyright (C) 2013 Intel Corporation
|
||||
* Author: Tim Chen <tim.c.chen@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/crc-t10dif.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/i387.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
|
||||
size_t len);
|
||||
|
||||
struct chksum_desc_ctx {
|
||||
__u16 crc;
|
||||
};
|
||||
|
||||
/*
|
||||
* Steps through buffer one byte at at time, calculates reflected
|
||||
* crc using table.
|
||||
*/
|
||||
|
||||
static int chksum_init(struct shash_desc *desc)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
ctx->crc = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chksum_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int length)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
if (irq_fpu_usable()) {
|
||||
kernel_fpu_begin();
|
||||
ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
|
||||
kernel_fpu_end();
|
||||
} else
|
||||
ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chksum_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
*(__u16 *)out = ctx->crc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
|
||||
u8 *out)
|
||||
{
|
||||
if (irq_fpu_usable()) {
|
||||
kernel_fpu_begin();
|
||||
*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
|
||||
kernel_fpu_end();
|
||||
} else
|
||||
*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chksum_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
return __chksum_finup(&ctx->crc, data, len, out);
|
||||
}
|
||||
|
||||
static int chksum_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int length, u8 *out)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
return __chksum_finup(&ctx->crc, data, length, out);
|
||||
}
|
||||
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = CRC_T10DIF_DIGEST_SIZE,
|
||||
.init = chksum_init,
|
||||
.update = chksum_update,
|
||||
.final = chksum_final,
|
||||
.finup = chksum_finup,
|
||||
.digest = chksum_digest,
|
||||
.descsize = sizeof(struct chksum_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "crct10dif",
|
||||
.cra_driver_name = "crct10dif-pclmul",
|
||||
.cra_priority = 200,
|
||||
.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id crct10dif_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id);
|
||||
|
||||
static int __init crct10dif_intel_mod_init(void)
|
||||
{
|
||||
if (!x86_match_cpu(crct10dif_cpu_id))
|
||||
return -ENODEV;
|
||||
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit crct10dif_intel_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(crct10dif_intel_mod_init);
|
||||
module_exit(crct10dif_intel_mod_fini);
|
||||
|
||||
MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
|
||||
MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
MODULE_ALIAS("crct10dif");
|
||||
MODULE_ALIAS("crct10dif-pclmul");
|
|
@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = {
|
|||
|
||||
#define AVX_XOR_SPEED \
|
||||
do { \
|
||||
if (cpu_has_avx) \
|
||||
if (cpu_has_avx && cpu_has_osxsave) \
|
||||
xor_speed(&xor_block_avx); \
|
||||
} while (0)
|
||||
|
||||
#define AVX_SELECT(FASTEST) \
|
||||
(cpu_has_avx ? &xor_block_avx : FASTEST)
|
||||
(cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -376,6 +376,25 @@ config CRYPTO_CRC32_PCLMUL
|
|||
which will enable any routine to use the CRC-32-IEEE 802.3 checksum
|
||||
and gain better performance as compared with the table implementation.
|
||||
|
||||
config CRYPTO_CRCT10DIF
|
||||
tristate "CRCT10DIF algorithm"
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
CRC T10 Data Integrity Field computation is being cast as
|
||||
a crypto transform. This allows for faster crc t10 diff
|
||||
transforms to be used if they are available.
|
||||
|
||||
config CRYPTO_CRCT10DIF_PCLMUL
|
||||
tristate "CRCT10DIF PCLMULQDQ hardware acceleration"
|
||||
depends on X86 && 64BIT && CRC_T10DIF
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
For x86_64 processors with SSE4.2 and PCLMULQDQ supported,
|
||||
CRC T10 DIF PCLMULQDQ computation can be hardware
|
||||
accelerated PCLMULQDQ instruction. This option will create
|
||||
'crct10dif-plcmul' module, which is faster when computing the
|
||||
crct10dif checksum as compared with the generic table implementation.
|
||||
|
||||
config CRYPTO_GHASH
|
||||
tristate "GHASH digest algorithm"
|
||||
select CRYPTO_GF128MUL
|
||||
|
|
|
@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
|
|||
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
|
||||
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
|
||||
obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
|
||||
obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif.o
|
||||
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
|
||||
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
|
||||
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
|
||||
|
|
|
@ -62,7 +62,7 @@ static inline u8 byte(const u32 x, const unsigned n)
|
|||
|
||||
static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
|
||||
|
||||
const u32 crypto_ft_tab[4][256] = {
|
||||
__visible const u32 crypto_ft_tab[4][256] = {
|
||||
{
|
||||
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
|
||||
0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
|
||||
|
@ -326,7 +326,7 @@ const u32 crypto_ft_tab[4][256] = {
|
|||
}
|
||||
};
|
||||
|
||||
const u32 crypto_fl_tab[4][256] = {
|
||||
__visible const u32 crypto_fl_tab[4][256] = {
|
||||
{
|
||||
0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
|
||||
0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
|
||||
|
@ -590,7 +590,7 @@ const u32 crypto_fl_tab[4][256] = {
|
|||
}
|
||||
};
|
||||
|
||||
const u32 crypto_it_tab[4][256] = {
|
||||
__visible const u32 crypto_it_tab[4][256] = {
|
||||
{
|
||||
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
|
||||
0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
|
||||
|
@ -854,7 +854,7 @@ const u32 crypto_it_tab[4][256] = {
|
|||
}
|
||||
};
|
||||
|
||||
const u32 crypto_il_tab[4][256] = {
|
||||
__visible const u32 crypto_il_tab[4][256] = {
|
||||
{
|
||||
0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
|
||||
0x00000030, 0x00000036, 0x000000a5, 0x00000038,
|
||||
|
|
|
@ -388,8 +388,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
/* round 6 */
|
||||
subL[7] ^= subL[1]; subR[7] ^= subR[1];
|
||||
subL[1] ^= subR[1] & ~subR[9];
|
||||
dw = subL[1] & subL[9],
|
||||
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
|
||||
dw = subL[1] & subL[9];
|
||||
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
|
||||
/* round 8 */
|
||||
subL[11] ^= subL[1]; subR[11] ^= subR[1];
|
||||
/* round 10 */
|
||||
|
@ -397,8 +397,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
/* round 12 */
|
||||
subL[15] ^= subL[1]; subR[15] ^= subR[1];
|
||||
subL[1] ^= subR[1] & ~subR[17];
|
||||
dw = subL[1] & subL[17],
|
||||
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
|
||||
dw = subL[1] & subL[17];
|
||||
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
|
||||
/* round 14 */
|
||||
subL[19] ^= subL[1]; subR[19] ^= subR[1];
|
||||
/* round 16 */
|
||||
|
@ -413,8 +413,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
kw4l = subL[25]; kw4r = subR[25];
|
||||
} else {
|
||||
subL[1] ^= subR[1] & ~subR[25];
|
||||
dw = subL[1] & subL[25],
|
||||
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
|
||||
dw = subL[1] & subL[25];
|
||||
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
|
||||
/* round 20 */
|
||||
subL[27] ^= subL[1]; subR[27] ^= subR[1];
|
||||
/* round 22 */
|
||||
|
@ -433,8 +433,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
/* round 19 */
|
||||
subL[26] ^= kw4l; subR[26] ^= kw4r;
|
||||
kw4l ^= kw4r & ~subR[24];
|
||||
dw = kw4l & subL[24],
|
||||
kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
|
||||
dw = kw4l & subL[24];
|
||||
kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
|
||||
}
|
||||
/* round 17 */
|
||||
subL[22] ^= kw4l; subR[22] ^= kw4r;
|
||||
|
@ -443,8 +443,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
/* round 13 */
|
||||
subL[18] ^= kw4l; subR[18] ^= kw4r;
|
||||
kw4l ^= kw4r & ~subR[16];
|
||||
dw = kw4l & subL[16],
|
||||
kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
|
||||
dw = kw4l & subL[16];
|
||||
kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
|
||||
/* round 11 */
|
||||
subL[14] ^= kw4l; subR[14] ^= kw4r;
|
||||
/* round 9 */
|
||||
|
@ -452,8 +452,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
/* round 7 */
|
||||
subL[10] ^= kw4l; subR[10] ^= kw4r;
|
||||
kw4l ^= kw4r & ~subR[8];
|
||||
dw = kw4l & subL[8],
|
||||
kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
|
||||
dw = kw4l & subL[8];
|
||||
kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
|
||||
/* round 5 */
|
||||
subL[6] ^= kw4l; subR[6] ^= kw4r;
|
||||
/* round 3 */
|
||||
|
@ -477,8 +477,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */
|
||||
SUBKEY_R(6) = subR[5] ^ subR[7];
|
||||
tl = subL[10] ^ (subR[10] & ~subR[8]);
|
||||
dw = tl & subL[8], /* FL(kl1) */
|
||||
tr = subR[10] ^ rol32(dw, 1);
|
||||
dw = tl & subL[8]; /* FL(kl1) */
|
||||
tr = subR[10] ^ rol32(dw, 1);
|
||||
SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
|
||||
SUBKEY_R(7) = subR[6] ^ tr;
|
||||
SUBKEY_L(8) = subL[8]; /* FL(kl1) */
|
||||
|
@ -486,8 +486,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */
|
||||
SUBKEY_R(9) = subR[9];
|
||||
tl = subL[7] ^ (subR[7] & ~subR[9]);
|
||||
dw = tl & subL[9], /* FLinv(kl2) */
|
||||
tr = subR[7] ^ rol32(dw, 1);
|
||||
dw = tl & subL[9]; /* FLinv(kl2) */
|
||||
tr = subR[7] ^ rol32(dw, 1);
|
||||
SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
|
||||
SUBKEY_R(10) = tr ^ subR[11];
|
||||
SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
|
||||
|
@ -499,8 +499,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */
|
||||
SUBKEY_R(14) = subR[13] ^ subR[15];
|
||||
tl = subL[18] ^ (subR[18] & ~subR[16]);
|
||||
dw = tl & subL[16], /* FL(kl3) */
|
||||
tr = subR[18] ^ rol32(dw, 1);
|
||||
dw = tl & subL[16]; /* FL(kl3) */
|
||||
tr = subR[18] ^ rol32(dw, 1);
|
||||
SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
|
||||
SUBKEY_R(15) = subR[14] ^ tr;
|
||||
SUBKEY_L(16) = subL[16]; /* FL(kl3) */
|
||||
|
@ -508,8 +508,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */
|
||||
SUBKEY_R(17) = subR[17];
|
||||
tl = subL[15] ^ (subR[15] & ~subR[17]);
|
||||
dw = tl & subL[17], /* FLinv(kl4) */
|
||||
tr = subR[15] ^ rol32(dw, 1);
|
||||
dw = tl & subL[17]; /* FLinv(kl4) */
|
||||
tr = subR[15] ^ rol32(dw, 1);
|
||||
SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
|
||||
SUBKEY_R(18) = tr ^ subR[19];
|
||||
SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
|
||||
|
@ -527,8 +527,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_R(24) = subR[24] ^ subR[23];
|
||||
} else {
|
||||
tl = subL[26] ^ (subR[26] & ~subR[24]);
|
||||
dw = tl & subL[24], /* FL(kl5) */
|
||||
tr = subR[26] ^ rol32(dw, 1);
|
||||
dw = tl & subL[24]; /* FL(kl5) */
|
||||
tr = subR[26] ^ rol32(dw, 1);
|
||||
SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
|
||||
SUBKEY_R(23) = subR[22] ^ tr;
|
||||
SUBKEY_L(24) = subL[24]; /* FL(kl5) */
|
||||
|
@ -536,8 +536,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */
|
||||
SUBKEY_R(25) = subR[25];
|
||||
tl = subL[23] ^ (subR[23] & ~subR[25]);
|
||||
dw = tl & subL[25], /* FLinv(kl6) */
|
||||
tr = subR[23] ^ rol32(dw, 1);
|
||||
dw = tl & subL[25]; /* FLinv(kl6) */
|
||||
tr = subR[23] ^ rol32(dw, 1);
|
||||
SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
|
||||
SUBKEY_R(26) = tr ^ subR[27];
|
||||
SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <crypto/cast_common.h>
|
||||
|
||||
const u32 cast_s1[256] = {
|
||||
__visible const u32 cast_s1[256] = {
|
||||
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
|
||||
0x9c004dd3, 0x6003e540, 0xcf9fc949,
|
||||
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0,
|
||||
|
@ -83,7 +83,7 @@ const u32 cast_s1[256] = {
|
|||
};
|
||||
EXPORT_SYMBOL_GPL(cast_s1);
|
||||
|
||||
const u32 cast_s2[256] = {
|
||||
__visible const u32 cast_s2[256] = {
|
||||
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a,
|
||||
0xeec5207a, 0x55889c94, 0x72fc0651,
|
||||
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef,
|
||||
|
@ -151,7 +151,7 @@ const u32 cast_s2[256] = {
|
|||
};
|
||||
EXPORT_SYMBOL_GPL(cast_s2);
|
||||
|
||||
const u32 cast_s3[256] = {
|
||||
__visible const u32 cast_s3[256] = {
|
||||
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff,
|
||||
0x369fe44b, 0x8c1fc644, 0xaececa90,
|
||||
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806,
|
||||
|
@ -219,7 +219,7 @@ const u32 cast_s3[256] = {
|
|||
};
|
||||
EXPORT_SYMBOL_GPL(cast_s3);
|
||||
|
||||
const u32 cast_s4[256] = {
|
||||
__visible const u32 cast_s4[256] = {
|
||||
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb,
|
||||
0x64ad8c57, 0x85510443, 0xfa020ed1,
|
||||
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43,
|
||||
|
|
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* T10 Data Integrity Field CRC16 Crypto Transform
|
||||
*
|
||||
* Copyright (c) 2007 Oracle Corporation. All rights reserved.
|
||||
* Written by Martin K. Petersen <martin.petersen@oracle.com>
|
||||
* Copyright (C) 2013 Intel Corporation
|
||||
* Author: Tim Chen <tim.c.chen@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/crc-t10dif.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct chksum_desc_ctx {
|
||||
__u16 crc;
|
||||
};
|
||||
|
||||
/* Table generated using the following polynomium:
|
||||
* x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
|
||||
* gt: 0x8bb7
|
||||
*/
|
||||
static const __u16 t10_dif_crc_table[256] = {
|
||||
0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
|
||||
0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
|
||||
0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
|
||||
0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
|
||||
0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
|
||||
0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
|
||||
0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
|
||||
0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
|
||||
0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
|
||||
0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
|
||||
0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
|
||||
0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
|
||||
0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
|
||||
0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
|
||||
0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
|
||||
0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
|
||||
0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
|
||||
0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
|
||||
0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
|
||||
0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
|
||||
0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
|
||||
0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
|
||||
0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
|
||||
0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
|
||||
0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
|
||||
0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
|
||||
0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
|
||||
0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
|
||||
0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
|
||||
0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
|
||||
0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
|
||||
0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
|
||||
};
|
||||
|
||||
__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0 ; i < len ; i++)
|
||||
crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
|
||||
|
||||
return crc;
|
||||
}
|
||||
EXPORT_SYMBOL(crc_t10dif_generic);
|
||||
|
||||
/*
|
||||
* Steps through buffer one byte at at time, calculates reflected
|
||||
* crc using table.
|
||||
*/
|
||||
|
||||
static int chksum_init(struct shash_desc *desc)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
ctx->crc = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chksum_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int length)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chksum_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
*(__u16 *)out = ctx->crc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
|
||||
u8 *out)
|
||||
{
|
||||
*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chksum_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
return __chksum_finup(&ctx->crc, data, len, out);
|
||||
}
|
||||
|
||||
static int chksum_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int length, u8 *out)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
return __chksum_finup(&ctx->crc, data, length, out);
|
||||
}
|
||||
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = CRC_T10DIF_DIGEST_SIZE,
|
||||
.init = chksum_init,
|
||||
.update = chksum_update,
|
||||
.final = chksum_final,
|
||||
.finup = chksum_finup,
|
||||
.digest = chksum_digest,
|
||||
.descsize = sizeof(struct chksum_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "crct10dif",
|
||||
.cra_driver_name = "crct10dif-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init crct10dif_mod_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = crypto_register_shash(&alg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit crct10dif_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(crct10dif_mod_init);
|
||||
module_exit(crct10dif_mod_fini);
|
||||
|
||||
MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
|
||||
MODULE_DESCRIPTION("T10 DIF CRC calculation.");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -110,7 +110,7 @@ static const __be32 sbox0[256] = {
|
|||
};
|
||||
|
||||
#undef Z
|
||||
#define Z(x) cpu_to_be32((x << 27) | (x >> 5))
|
||||
#define Z(x) cpu_to_be32(((x & 0x1f) << 27) | (x >> 5))
|
||||
static const __be32 sbox1[256] = {
|
||||
Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e),
|
||||
Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85),
|
||||
|
|
|
@ -124,3 +124,25 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
|
|||
scatterwalk_done(&walk, out, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
|
||||
|
||||
int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
|
||||
{
|
||||
int offset = 0, n = 0;
|
||||
|
||||
/* num_bytes is too small */
|
||||
if (num_bytes < sg->length)
|
||||
return -1;
|
||||
|
||||
do {
|
||||
offset += sg->length;
|
||||
n++;
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
|
||||
/* num_bytes is too large */
|
||||
if (unlikely(!sg && (num_bytes < offset)))
|
||||
return -1;
|
||||
} while (sg && (num_bytes > offset));
|
||||
|
||||
return n;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen);
|
||||
|
|
|
@ -1174,6 +1174,10 @@ static int do_test(int m)
|
|||
ret += tcrypt_test("ghash");
|
||||
break;
|
||||
|
||||
case 47:
|
||||
ret += tcrypt_test("crct10dif");
|
||||
break;
|
||||
|
||||
case 100:
|
||||
ret += tcrypt_test("hmac(md5)");
|
||||
break;
|
||||
|
@ -1498,6 +1502,10 @@ static int do_test(int m)
|
|||
test_hash_speed("crc32c", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
case 320:
|
||||
test_hash_speed("crct10dif", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
case 399:
|
||||
break;
|
||||
|
||||
|
|
|
@ -2045,6 +2045,16 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||
.count = CRC32C_TEST_VECTORS
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "crct10dif",
|
||||
.test = alg_test_hash,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.hash = {
|
||||
.vecs = crct10dif_tv_template,
|
||||
.count = CRCT10DIF_TEST_VECTORS
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "cryptd(__driver-cbc-aes-aesni)",
|
||||
.test = alg_test_null,
|
||||
|
@ -3224,7 +3234,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
|
|||
if (i >= 0)
|
||||
rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
|
||||
type, mask);
|
||||
if (j >= 0)
|
||||
if (j >= 0 && j != i)
|
||||
rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
|
||||
type, mask);
|
||||
|
||||
|
|
|
@ -450,6 +450,39 @@ static struct hash_testvec rmd320_tv_template[] = {
|
|||
}
|
||||
};
|
||||
|
||||
#define CRCT10DIF_TEST_VECTORS 3
|
||||
static struct hash_testvec crct10dif_tv_template[] = {
|
||||
{
|
||||
.plaintext = "abc",
|
||||
.psize = 3,
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
.digest = "\x3b\x44",
|
||||
#else
|
||||
.digest = "\x44\x3b",
|
||||
#endif
|
||||
}, {
|
||||
.plaintext = "1234567890123456789012345678901234567890"
|
||||
"123456789012345678901234567890123456789",
|
||||
.psize = 79,
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
.digest = "\x70\x4b",
|
||||
#else
|
||||
.digest = "\x4b\x70",
|
||||
#endif
|
||||
}, {
|
||||
.plaintext =
|
||||
"abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
|
||||
.psize = 56,
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
.digest = "\xe3\x9c",
|
||||
#else
|
||||
.digest = "\x9c\xe3",
|
||||
#endif
|
||||
.np = 2,
|
||||
.tap = { 28, 28 }
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* SHA1 test vectors from from FIPS PUB 180-1
|
||||
* Long vector from CAVS 5.0
|
||||
|
|
|
@ -153,12 +153,12 @@ config HW_RANDOM_IXP4XX
|
|||
|
||||
config HW_RANDOM_OMAP
|
||||
tristate "OMAP Random Number Generator support"
|
||||
depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2)
|
||||
depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2PLUS)
|
||||
default HW_RANDOM
|
||||
---help---
|
||||
This driver provides kernel-side support for the Random Number
|
||||
Generator hardware found on OMAP16xx and OMAP24xx multimedia
|
||||
processors.
|
||||
Generator hardware found on OMAP16xx, OMAP2/3/4/5 and AM33xx/AM43xx
|
||||
multimedia processors.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called omap-rng.
|
||||
|
|
|
@ -164,7 +164,9 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
clk_prepare_enable(mxc_rng->clk);
|
||||
err = clk_prepare_enable(mxc_rng->clk);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res);
|
||||
|
|
|
@ -24,57 +24,131 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
#define RNG_OUT_REG 0x00 /* Output register */
|
||||
#define RNG_STAT_REG 0x04 /* Status register
|
||||
[0] = STAT_BUSY */
|
||||
#define RNG_ALARM_REG 0x24 /* Alarm register
|
||||
[7:0] = ALARM_COUNTER */
|
||||
#define RNG_CONFIG_REG 0x28 /* Configuration register
|
||||
[11:6] = RESET_COUNT
|
||||
[5:3] = RING2_DELAY
|
||||
[2:0] = RING1_DELAY */
|
||||
#define RNG_REV_REG 0x3c /* Revision register
|
||||
[7:0] = REV_NB */
|
||||
#define RNG_MASK_REG 0x40 /* Mask and reset register
|
||||
[2] = IT_EN
|
||||
[1] = SOFTRESET
|
||||
[0] = AUTOIDLE */
|
||||
#define RNG_SYSSTATUS 0x44 /* System status
|
||||
[0] = RESETDONE */
|
||||
#define RNG_REG_STATUS_RDY (1 << 0)
|
||||
|
||||
/**
|
||||
* struct omap_rng_private_data - RNG IP block-specific data
|
||||
* @base: virtual address of the beginning of the RNG IP block registers
|
||||
* @mem_res: struct resource * for the IP block registers physical memory
|
||||
*/
|
||||
struct omap_rng_private_data {
|
||||
void __iomem *base;
|
||||
struct resource *mem_res;
|
||||
#define RNG_REG_INTACK_RDY_MASK (1 << 0)
|
||||
#define RNG_REG_INTACK_SHUTDOWN_OFLO_MASK (1 << 1)
|
||||
#define RNG_SHUTDOWN_OFLO_MASK (1 << 1)
|
||||
|
||||
#define RNG_CONTROL_STARTUP_CYCLES_SHIFT 16
|
||||
#define RNG_CONTROL_STARTUP_CYCLES_MASK (0xffff << 16)
|
||||
#define RNG_CONTROL_ENABLE_TRNG_SHIFT 10
|
||||
#define RNG_CONTROL_ENABLE_TRNG_MASK (1 << 10)
|
||||
|
||||
#define RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT 16
|
||||
#define RNG_CONFIG_MAX_REFIL_CYCLES_MASK (0xffff << 16)
|
||||
#define RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT 0
|
||||
#define RNG_CONFIG_MIN_REFIL_CYCLES_MASK (0xff << 0)
|
||||
|
||||
#define RNG_CONTROL_STARTUP_CYCLES 0xff
|
||||
#define RNG_CONFIG_MIN_REFIL_CYCLES 0x21
|
||||
#define RNG_CONFIG_MAX_REFIL_CYCLES 0x22
|
||||
|
||||
#define RNG_ALARMCNT_ALARM_TH_SHIFT 0x0
|
||||
#define RNG_ALARMCNT_ALARM_TH_MASK (0xff << 0)
|
||||
#define RNG_ALARMCNT_SHUTDOWN_TH_SHIFT 16
|
||||
#define RNG_ALARMCNT_SHUTDOWN_TH_MASK (0x1f << 16)
|
||||
#define RNG_ALARM_THRESHOLD 0xff
|
||||
#define RNG_SHUTDOWN_THRESHOLD 0x4
|
||||
|
||||
#define RNG_REG_FROENABLE_MASK 0xffffff
|
||||
#define RNG_REG_FRODETUNE_MASK 0xffffff
|
||||
|
||||
#define OMAP2_RNG_OUTPUT_SIZE 0x4
|
||||
#define OMAP4_RNG_OUTPUT_SIZE 0x8
|
||||
|
||||
enum {
|
||||
RNG_OUTPUT_L_REG = 0,
|
||||
RNG_OUTPUT_H_REG,
|
||||
RNG_STATUS_REG,
|
||||
RNG_INTMASK_REG,
|
||||
RNG_INTACK_REG,
|
||||
RNG_CONTROL_REG,
|
||||
RNG_CONFIG_REG,
|
||||
RNG_ALARMCNT_REG,
|
||||
RNG_FROENABLE_REG,
|
||||
RNG_FRODETUNE_REG,
|
||||
RNG_ALARMMASK_REG,
|
||||
RNG_ALARMSTOP_REG,
|
||||
RNG_REV_REG,
|
||||
RNG_SYSCONFIG_REG,
|
||||
};
|
||||
|
||||
static inline u32 omap_rng_read_reg(struct omap_rng_private_data *priv, int reg)
|
||||
static const u16 reg_map_omap2[] = {
|
||||
[RNG_OUTPUT_L_REG] = 0x0,
|
||||
[RNG_STATUS_REG] = 0x4,
|
||||
[RNG_CONFIG_REG] = 0x28,
|
||||
[RNG_REV_REG] = 0x3c,
|
||||
[RNG_SYSCONFIG_REG] = 0x40,
|
||||
};
|
||||
|
||||
static const u16 reg_map_omap4[] = {
|
||||
[RNG_OUTPUT_L_REG] = 0x0,
|
||||
[RNG_OUTPUT_H_REG] = 0x4,
|
||||
[RNG_STATUS_REG] = 0x8,
|
||||
[RNG_INTMASK_REG] = 0xc,
|
||||
[RNG_INTACK_REG] = 0x10,
|
||||
[RNG_CONTROL_REG] = 0x14,
|
||||
[RNG_CONFIG_REG] = 0x18,
|
||||
[RNG_ALARMCNT_REG] = 0x1c,
|
||||
[RNG_FROENABLE_REG] = 0x20,
|
||||
[RNG_FRODETUNE_REG] = 0x24,
|
||||
[RNG_ALARMMASK_REG] = 0x28,
|
||||
[RNG_ALARMSTOP_REG] = 0x2c,
|
||||
[RNG_REV_REG] = 0x1FE0,
|
||||
[RNG_SYSCONFIG_REG] = 0x1FE4,
|
||||
};
|
||||
|
||||
struct omap_rng_dev;
|
||||
/**
|
||||
* struct omap_rng_pdata - RNG IP block-specific data
|
||||
* @regs: Pointer to the register offsets structure.
|
||||
* @data_size: No. of bytes in RNG output.
|
||||
* @data_present: Callback to determine if data is available.
|
||||
* @init: Callback for IP specific initialization sequence.
|
||||
* @cleanup: Callback for IP specific cleanup sequence.
|
||||
*/
|
||||
struct omap_rng_pdata {
|
||||
u16 *regs;
|
||||
u32 data_size;
|
||||
u32 (*data_present)(struct omap_rng_dev *priv);
|
||||
int (*init)(struct omap_rng_dev *priv);
|
||||
void (*cleanup)(struct omap_rng_dev *priv);
|
||||
};
|
||||
|
||||
struct omap_rng_dev {
|
||||
void __iomem *base;
|
||||
struct device *dev;
|
||||
const struct omap_rng_pdata *pdata;
|
||||
};
|
||||
|
||||
static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
|
||||
{
|
||||
return __raw_readl(priv->base + reg);
|
||||
return __raw_readl(priv->base + priv->pdata->regs[reg]);
|
||||
}
|
||||
|
||||
static inline void omap_rng_write_reg(struct omap_rng_private_data *priv,
|
||||
int reg, u32 val)
|
||||
static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
|
||||
u32 val)
|
||||
{
|
||||
__raw_writel(val, priv->base + reg);
|
||||
__raw_writel(val, priv->base + priv->pdata->regs[reg]);
|
||||
}
|
||||
|
||||
static int omap_rng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
struct omap_rng_private_data *priv;
|
||||
struct omap_rng_dev *priv;
|
||||
int data, i;
|
||||
|
||||
priv = (struct omap_rng_private_data *)rng->priv;
|
||||
priv = (struct omap_rng_dev *)rng->priv;
|
||||
|
||||
for (i = 0; i < 20; i++) {
|
||||
data = omap_rng_read_reg(priv, RNG_STAT_REG) ? 0 : 1;
|
||||
data = priv->pdata->data_present(priv);
|
||||
if (data || !wait)
|
||||
break;
|
||||
/* RNG produces data fast enough (2+ MBit/sec, even
|
||||
|
@ -89,27 +163,212 @@ static int omap_rng_data_present(struct hwrng *rng, int wait)
|
|||
|
||||
static int omap_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
{
|
||||
struct omap_rng_private_data *priv;
|
||||
struct omap_rng_dev *priv;
|
||||
u32 data_size, i;
|
||||
|
||||
priv = (struct omap_rng_private_data *)rng->priv;
|
||||
priv = (struct omap_rng_dev *)rng->priv;
|
||||
data_size = priv->pdata->data_size;
|
||||
|
||||
*data = omap_rng_read_reg(priv, RNG_OUT_REG);
|
||||
for (i = 0; i < data_size / sizeof(u32); i++)
|
||||
data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i);
|
||||
|
||||
return sizeof(u32);
|
||||
if (priv->pdata->regs[RNG_INTACK_REG])
|
||||
omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
|
||||
return data_size;
|
||||
}
|
||||
|
||||
static int omap_rng_init(struct hwrng *rng)
|
||||
{
|
||||
struct omap_rng_dev *priv;
|
||||
|
||||
priv = (struct omap_rng_dev *)rng->priv;
|
||||
return priv->pdata->init(priv);
|
||||
}
|
||||
|
||||
static void omap_rng_cleanup(struct hwrng *rng)
|
||||
{
|
||||
struct omap_rng_dev *priv;
|
||||
|
||||
priv = (struct omap_rng_dev *)rng->priv;
|
||||
priv->pdata->cleanup(priv);
|
||||
}
|
||||
|
||||
static struct hwrng omap_rng_ops = {
|
||||
.name = "omap",
|
||||
.data_present = omap_rng_data_present,
|
||||
.data_read = omap_rng_data_read,
|
||||
.init = omap_rng_init,
|
||||
.cleanup = omap_rng_cleanup,
|
||||
};
|
||||
|
||||
static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
|
||||
{
|
||||
return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1;
|
||||
}
|
||||
|
||||
static int omap2_rng_init(struct omap_rng_dev *priv)
|
||||
{
|
||||
omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap2_rng_cleanup(struct omap_rng_dev *priv)
|
||||
{
|
||||
omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0);
|
||||
}
|
||||
|
||||
static struct omap_rng_pdata omap2_rng_pdata = {
|
||||
.regs = (u16 *)reg_map_omap2,
|
||||
.data_size = OMAP2_RNG_OUTPUT_SIZE,
|
||||
.data_present = omap2_rng_data_present,
|
||||
.init = omap2_rng_init,
|
||||
.cleanup = omap2_rng_cleanup,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_OF)
|
||||
static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
|
||||
{
|
||||
return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
|
||||
}
|
||||
|
||||
static int omap4_rng_init(struct omap_rng_dev *priv)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Return if RNG is already running. */
|
||||
if (omap_rng_read(priv, RNG_CONFIG_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
|
||||
return 0;
|
||||
|
||||
val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
|
||||
val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
|
||||
omap_rng_write(priv, RNG_CONFIG_REG, val);
|
||||
|
||||
omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
|
||||
omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
|
||||
val = RNG_ALARM_THRESHOLD << RNG_ALARMCNT_ALARM_TH_SHIFT;
|
||||
val |= RNG_SHUTDOWN_THRESHOLD << RNG_ALARMCNT_SHUTDOWN_TH_SHIFT;
|
||||
omap_rng_write(priv, RNG_ALARMCNT_REG, val);
|
||||
|
||||
val = RNG_CONTROL_STARTUP_CYCLES << RNG_CONTROL_STARTUP_CYCLES_SHIFT;
|
||||
val |= RNG_CONTROL_ENABLE_TRNG_MASK;
|
||||
omap_rng_write(priv, RNG_CONTROL_REG, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap4_rng_cleanup(struct omap_rng_dev *priv)
|
||||
{
|
||||
int val;
|
||||
|
||||
val = omap_rng_read(priv, RNG_CONTROL_REG);
|
||||
val &= ~RNG_CONTROL_ENABLE_TRNG_MASK;
|
||||
omap_rng_write(priv, RNG_CONFIG_REG, val);
|
||||
}
|
||||
|
||||
static irqreturn_t omap4_rng_irq(int irq, void *dev_id)
|
||||
{
|
||||
struct omap_rng_dev *priv = dev_id;
|
||||
u32 fro_detune, fro_enable;
|
||||
|
||||
/*
|
||||
* Interrupt raised by a fro shutdown threshold, do the following:
|
||||
* 1. Clear the alarm events.
|
||||
* 2. De tune the FROs which are shutdown.
|
||||
* 3. Re enable the shutdown FROs.
|
||||
*/
|
||||
omap_rng_write(priv, RNG_ALARMMASK_REG, 0x0);
|
||||
omap_rng_write(priv, RNG_ALARMSTOP_REG, 0x0);
|
||||
|
||||
fro_enable = omap_rng_read(priv, RNG_FROENABLE_REG);
|
||||
fro_detune = ~fro_enable & RNG_REG_FRODETUNE_MASK;
|
||||
fro_detune = fro_detune | omap_rng_read(priv, RNG_FRODETUNE_REG);
|
||||
fro_enable = RNG_REG_FROENABLE_MASK;
|
||||
|
||||
omap_rng_write(priv, RNG_FRODETUNE_REG, fro_detune);
|
||||
omap_rng_write(priv, RNG_FROENABLE_REG, fro_enable);
|
||||
|
||||
omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_SHUTDOWN_OFLO_MASK);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct omap_rng_pdata omap4_rng_pdata = {
|
||||
.regs = (u16 *)reg_map_omap4,
|
||||
.data_size = OMAP4_RNG_OUTPUT_SIZE,
|
||||
.data_present = omap4_rng_data_present,
|
||||
.init = omap4_rng_init,
|
||||
.cleanup = omap4_rng_cleanup,
|
||||
};
|
||||
|
||||
static const struct of_device_id omap_rng_of_match[] = {
|
||||
{
|
||||
.compatible = "ti,omap2-rng",
|
||||
.data = &omap2_rng_pdata,
|
||||
},
|
||||
{
|
||||
.compatible = "ti,omap4-rng",
|
||||
.data = &omap4_rng_pdata,
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, omap_rng_of_match);
|
||||
|
||||
static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct device *dev = &pdev->dev;
|
||||
int irq, err;
|
||||
|
||||
match = of_match_device(of_match_ptr(omap_rng_of_match), dev);
|
||||
if (!match) {
|
||||
dev_err(dev, "no compatible OF match\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
priv->pdata = match->data;
|
||||
|
||||
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) {
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "%s: error getting IRQ resource - %d\n",
|
||||
__func__, irq);
|
||||
return irq;
|
||||
}
|
||||
|
||||
err = devm_request_irq(dev, irq, omap4_rng_irq,
|
||||
IRQF_TRIGGER_NONE, dev_name(dev), priv);
|
||||
if (err) {
|
||||
dev_err(dev, "unable to request irq %d, err = %d\n",
|
||||
irq, err);
|
||||
return err;
|
||||
}
|
||||
omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
|
||||
{
|
||||
/* Only OMAP2/3 can be non-DT */
|
||||
omap_rng->pdata = &omap2_rng_pdata;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_rng_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct omap_rng_private_data *priv;
|
||||
struct omap_rng_dev *priv;
|
||||
struct resource *res;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
priv = kzalloc(sizeof(struct omap_rng_private_data), GFP_KERNEL);
|
||||
priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
dev_err(&pdev->dev, "could not allocate memory\n");
|
||||
return -ENOMEM;
|
||||
|
@ -117,26 +376,29 @@ static int omap_rng_probe(struct platform_device *pdev)
|
|||
|
||||
omap_rng_ops.priv = (unsigned long)priv;
|
||||
platform_set_drvdata(pdev, priv);
|
||||
priv->dev = dev;
|
||||
|
||||
priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res);
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(priv->base)) {
|
||||
ret = PTR_ERR(priv->base);
|
||||
goto err_ioremap;
|
||||
}
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
|
||||
get_omap_rng_device_details(priv);
|
||||
if (ret)
|
||||
goto err_ioremap;
|
||||
|
||||
ret = hwrng_register(&omap_rng_ops);
|
||||
if (ret)
|
||||
goto err_register;
|
||||
|
||||
dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
|
||||
omap_rng_read_reg(priv, RNG_REV_REG));
|
||||
|
||||
omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
|
||||
omap_rng_read(priv, RNG_REV_REG));
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -144,26 +406,21 @@ err_register:
|
|||
priv->base = NULL;
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
err_ioremap:
|
||||
kfree(priv);
|
||||
|
||||
dev_err(dev, "initialization failed.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __exit omap_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct omap_rng_private_data *priv = platform_get_drvdata(pdev);
|
||||
struct omap_rng_dev *priv = platform_get_drvdata(pdev);
|
||||
|
||||
hwrng_unregister(&omap_rng_ops);
|
||||
|
||||
omap_rng_write_reg(priv, RNG_MASK_REG, 0x0);
|
||||
priv->pdata->cleanup(priv);
|
||||
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
release_mem_region(priv->mem_res->start, resource_size(priv->mem_res));
|
||||
|
||||
kfree(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -171,9 +428,9 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
|
|||
|
||||
static int omap_rng_suspend(struct device *dev)
|
||||
{
|
||||
struct omap_rng_private_data *priv = dev_get_drvdata(dev);
|
||||
struct omap_rng_dev *priv = dev_get_drvdata(dev);
|
||||
|
||||
omap_rng_write_reg(priv, RNG_MASK_REG, 0x0);
|
||||
priv->pdata->cleanup(priv);
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -181,10 +438,10 @@ static int omap_rng_suspend(struct device *dev)
|
|||
|
||||
static int omap_rng_resume(struct device *dev)
|
||||
{
|
||||
struct omap_rng_private_data *priv = dev_get_drvdata(dev);
|
||||
struct omap_rng_dev *priv = dev_get_drvdata(dev);
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
|
||||
priv->pdata->init(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -198,31 +455,18 @@ static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
|
|||
|
||||
#endif
|
||||
|
||||
/* work with hotplug and coldplug */
|
||||
MODULE_ALIAS("platform:omap_rng");
|
||||
|
||||
static struct platform_driver omap_rng_driver = {
|
||||
.driver = {
|
||||
.name = "omap_rng",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = OMAP_RNG_PM,
|
||||
.of_match_table = of_match_ptr(omap_rng_of_match),
|
||||
},
|
||||
.probe = omap_rng_probe,
|
||||
.remove = __exit_p(omap_rng_remove),
|
||||
};
|
||||
|
||||
static int __init omap_rng_init(void)
|
||||
{
|
||||
return platform_driver_register(&omap_rng_driver);
|
||||
}
|
||||
|
||||
static void __exit omap_rng_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&omap_rng_driver);
|
||||
}
|
||||
|
||||
module_init(omap_rng_init);
|
||||
module_exit(omap_rng_exit);
|
||||
|
||||
module_platform_driver(omap_rng_driver);
|
||||
MODULE_ALIAS("platform:omap_rng");
|
||||
MODULE_AUTHOR("Deepak Saxena (and others)");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
static void __iomem *rng_base;
|
||||
static struct clk *rng_clk;
|
||||
struct device *rng_dev;
|
||||
static struct device *rng_dev;
|
||||
|
||||
static inline u32 picoxcell_trng_read_csr(void)
|
||||
{
|
||||
|
|
|
@ -110,12 +110,10 @@ static int __init tx4939_rng_probe(struct platform_device *dev)
|
|||
struct resource *r;
|
||||
int i;
|
||||
|
||||
r = platform_get_resource(dev, IORESOURCE_MEM, 0);
|
||||
if (!r)
|
||||
return -EBUSY;
|
||||
rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
|
||||
if (!rngdev)
|
||||
return -ENOMEM;
|
||||
r = platform_get_resource(dev, IORESOURCE_MEM, 0);
|
||||
rngdev->base = devm_ioremap_resource(&dev->dev, r);
|
||||
if (IS_ERR(rngdev->base))
|
||||
return PTR_ERR(rngdev->base);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
@ -220,5 +221,11 @@ static void __exit mod_exit(void)
|
|||
module_init(mod_init);
|
||||
module_exit(mod_exit);
|
||||
|
||||
static struct x86_cpu_id via_rng_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
|
||||
{}
|
||||
};
|
||||
|
||||
MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id);
|
||||
|
|
|
@ -242,17 +242,20 @@ config CRYPTO_DEV_PPC4XX
|
|||
This option allows you to have support for AMCC crypto acceleration.
|
||||
|
||||
config CRYPTO_DEV_OMAP_SHAM
|
||||
tristate "Support for OMAP SHA1/MD5 hw accelerator"
|
||||
depends on ARCH_OMAP2 || ARCH_OMAP3
|
||||
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
|
||||
depends on ARCH_OMAP2PLUS
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_SHA256
|
||||
select CRYPTO_SHA512
|
||||
select CRYPTO_HMAC
|
||||
help
|
||||
OMAP processors have SHA1/MD5 hw accelerator. Select this if you
|
||||
want to use the OMAP module for SHA1/MD5 algorithms.
|
||||
OMAP processors have MD5/SHA1/SHA2 hw accelerator. Select this if you
|
||||
want to use the OMAP module for MD5/SHA1/SHA2 algorithms.
|
||||
|
||||
config CRYPTO_DEV_OMAP_AES
|
||||
tristate "Support for OMAP AES hw engine"
|
||||
depends on ARCH_OMAP2 || ARCH_OMAP3
|
||||
depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_BLKCIPHER2
|
||||
help
|
||||
|
|
|
@ -32,10 +32,10 @@
|
|||
#include "crypto4xx_sa.h"
|
||||
#include "crypto4xx_core.h"
|
||||
|
||||
void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
|
||||
u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc,
|
||||
u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op,
|
||||
u32 dir)
|
||||
static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
|
||||
u32 save_iv, u32 ld_h, u32 ld_iv,
|
||||
u32 hdr_proc, u32 h, u32 c, u32 pad_type,
|
||||
u32 op_grp, u32 op, u32 dir)
|
||||
{
|
||||
sa->sa_command_0.w = 0;
|
||||
sa->sa_command_0.bf.save_hash_state = save_h;
|
||||
|
@ -52,9 +52,10 @@ void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
|
|||
sa->sa_command_0.bf.dir = dir;
|
||||
}
|
||||
|
||||
void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
|
||||
u32 cfb, u32 esn, u32 sn_mask, u32 mute,
|
||||
u32 cp_pad, u32 cp_pay, u32 cp_hdr)
|
||||
static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
|
||||
u32 hmac_mc, u32 cfb, u32 esn,
|
||||
u32 sn_mask, u32 mute, u32 cp_pad,
|
||||
u32 cp_pay, u32 cp_hdr)
|
||||
{
|
||||
sa->sa_command_1.w = 0;
|
||||
sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
|
||||
|
|
|
@ -98,3 +98,11 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
|
|||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called caamrng.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_DEBUG
|
||||
bool "Enable debug output in CAAM driver"
|
||||
depends on CRYPTO_DEV_FSL_CAAM
|
||||
default n
|
||||
help
|
||||
Selecting this will enable printing of various debug
|
||||
information in the CAAM driver.
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
#
|
||||
# Makefile for the CAAM backend and dependent components
|
||||
#
|
||||
ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
|
||||
EXTRA_CFLAGS := -DDEBUG
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
|
||||
|
|
|
@ -65,8 +65,6 @@
|
|||
#define CAAM_MAX_IV_LENGTH 16
|
||||
|
||||
/* length of descriptors text */
|
||||
#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
|
||||
|
||||
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
|
||||
|
@ -84,8 +82,6 @@
|
|||
|
||||
#ifdef DEBUG
|
||||
/* for print_hex_dumps with line references */
|
||||
#define xstr(s) str(s)
|
||||
#define str(s) #s
|
||||
#define debug(format, arg...) printk(format, arg)
|
||||
#else
|
||||
#define debug(format, arg...)
|
||||
|
@ -285,7 +281,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -353,7 +349,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -436,7 +432,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -500,7 +496,7 @@ static int aead_setkey(struct crypto_aead *aead,
|
|||
keylen, enckeylen, authkeylen);
|
||||
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
|
||||
ctx->split_key_len, ctx->split_key_pad_len);
|
||||
print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
|
||||
|
@ -519,7 +515,7 @@ static int aead_setkey(struct crypto_aead *aead,
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
||||
ctx->split_key_pad_len + enckeylen, 1);
|
||||
#endif
|
||||
|
@ -549,7 +545,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
|||
u32 *desc;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
|
||||
|
@ -598,7 +594,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ablkcipher enc shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -643,7 +640,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
|||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ablkcipher dec shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -780,13 +778,13 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
aead_unmap(jrdev, edesc, req);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
|
||||
req->assoclen , 1);
|
||||
print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
|
||||
edesc->src_nents ? 100 : ivsize, 1);
|
||||
print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
|
||||
edesc->src_nents ? 100 : req->cryptlen +
|
||||
ctx->authsize + 4, 1);
|
||||
|
@ -814,10 +812,10 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
offsetof(struct aead_edesc, hw_desc));
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
ivsize, 1);
|
||||
print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
|
||||
req->cryptlen, 1);
|
||||
#endif
|
||||
|
@ -837,7 +835,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
err = -EBADMSG;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
|
||||
sizeof(struct iphdr) + req->assoclen +
|
||||
|
@ -845,7 +843,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
ctx->authsize + 36, 1);
|
||||
if (!err && edesc->sec4_sg_bytes) {
|
||||
struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
|
||||
print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
|
||||
sg->length + ctx->authsize + 16, 1);
|
||||
}
|
||||
|
@ -878,10 +876,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
|
||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||
print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
|
||||
edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
|
||||
#endif
|
||||
|
@ -913,10 +911,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
|
||||
ivsize, 1);
|
||||
print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
|
||||
edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
|
||||
#endif
|
||||
|
@ -947,16 +945,16 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
|
|||
#ifdef DEBUG
|
||||
debug("assoclen %d cryptlen %d authsize %d\n",
|
||||
req->assoclen, req->cryptlen, authsize);
|
||||
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
|
||||
req->assoclen , 1);
|
||||
print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
edesc->src_nents ? 100 : ivsize, 1);
|
||||
print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
|
||||
edesc->src_nents ? 100 : req->cryptlen, 1);
|
||||
print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
|
||||
desc_bytes(sh_desc), 1);
|
||||
#endif
|
||||
|
@ -1025,15 +1023,15 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
|
|||
#ifdef DEBUG
|
||||
debug("assoclen %d cryptlen %d authsize %d\n",
|
||||
req->assoclen, req->cryptlen, authsize);
|
||||
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
|
||||
req->assoclen , 1);
|
||||
print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
|
||||
print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
|
||||
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
|
||||
print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
|
||||
desc_bytes(sh_desc), 1);
|
||||
#endif
|
||||
|
@ -1086,10 +1084,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
|
|||
int len, sec4_sg_index = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
|
||||
ivsize, 1);
|
||||
print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
|
||||
edesc->src_nents ? 100 : req->nbytes, 1);
|
||||
#endif
|
||||
|
@ -1247,7 +1245,7 @@ static int aead_encrypt(struct aead_request *req)
|
|||
init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
|
||||
all_contig, true);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
@ -1281,7 +1279,7 @@ static int aead_decrypt(struct aead_request *req)
|
|||
return PTR_ERR(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
|
||||
req->cryptlen, 1);
|
||||
#endif
|
||||
|
@ -1290,7 +1288,7 @@ static int aead_decrypt(struct aead_request *req)
|
|||
init_aead_job(ctx->sh_desc_dec,
|
||||
ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
@ -1437,7 +1435,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
|
|||
return PTR_ERR(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
|
||||
req->cryptlen, 1);
|
||||
#endif
|
||||
|
@ -1446,7 +1444,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
|
|||
init_aead_giv_job(ctx->sh_desc_givenc,
|
||||
ctx->sh_desc_givenc_dma, edesc, req, contig);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
@ -1546,7 +1544,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
|||
edesc->iv_dma = iv_dma;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
|
||||
sec4_sg_bytes, 1);
|
||||
#endif
|
||||
|
@ -1575,7 +1573,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
|
|||
init_ablkcipher_job(ctx->sh_desc_enc,
|
||||
ctx->sh_desc_enc_dma, edesc, req, iv_contig);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
@ -1613,7 +1611,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
|
|||
ctx->sh_desc_dec_dma, edesc, req, iv_contig);
|
||||
desc = edesc->hw_desc;
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
|
|
@ -72,8 +72,6 @@
|
|||
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
|
||||
|
||||
/* length of descriptors text */
|
||||
#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
|
||||
|
||||
#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
|
||||
|
@ -91,8 +89,6 @@
|
|||
|
||||
#ifdef DEBUG
|
||||
/* for print_hex_dumps with line references */
|
||||
#define xstr(s) str(s)
|
||||
#define str(s) #s
|
||||
#define debug(format, arg...) printk(format, arg)
|
||||
#else
|
||||
#define debug(format, arg...)
|
||||
|
@ -331,7 +327,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ahash update shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
|
@ -349,7 +346,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ahash update first shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
|
@ -366,7 +364,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -384,7 +382,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -403,7 +401,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ahash digest shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -464,9 +463,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
|||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
|
@ -479,7 +478,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
|||
wait_for_completion_interruptible(&result.completion);
|
||||
ret = result.err;
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR,
|
||||
"digested key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_in,
|
||||
digestsize, 1);
|
||||
#endif
|
||||
|
@ -530,7 +530,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
|
|||
#ifdef DEBUG
|
||||
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
|
||||
ctx->split_key_len, ctx->split_key_pad_len);
|
||||
print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
|
||||
|
@ -545,7 +545,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
|
|||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
||||
ctx->split_key_pad_len, 1);
|
||||
#endif
|
||||
|
@ -638,11 +638,11 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
#endif
|
||||
|
@ -676,11 +676,11 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
|||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
#endif
|
||||
|
@ -714,11 +714,11 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
|||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
#endif
|
||||
|
@ -752,11 +752,11 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
|||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
#endif
|
||||
|
@ -852,7 +852,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -871,9 +871,9 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||
*next_buflen = last_buflen;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
||||
print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
||||
*next_buflen, 1);
|
||||
#endif
|
||||
|
@ -937,7 +937,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||
digestsize);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
|
@ -1016,7 +1016,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|||
digestsize);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
|
@ -1086,7 +1086,7 @@ static int ahash_digest(struct ahash_request *req)
|
|||
digestsize);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
|
@ -1140,7 +1140,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
|||
edesc->src_nents = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
|
@ -1228,7 +1228,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -1250,9 +1250,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||
*next_buflen = 0;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
||||
print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
||||
*next_buflen, 1);
|
||||
#endif
|
||||
|
@ -1321,7 +1321,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
|||
digestsize);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
|
@ -1414,7 +1414,7 @@ static int ahash_update_first(struct ahash_request *req)
|
|||
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
@ -1438,7 +1438,7 @@ static int ahash_update_first(struct ahash_request *req)
|
|||
sg_copy(next_buf, req->src, req->nbytes);
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
||||
*next_buflen, 1);
|
||||
#endif
|
||||
|
|
|
@ -75,55 +75,53 @@ static void build_instantiation_desc(u32 *desc)
|
|||
OP_ALG_RNG4_SK);
|
||||
}
|
||||
|
||||
struct instantiate_result {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static int instantiate_rng(struct device *ctrldev)
|
||||
{
|
||||
struct instantiate_result *instantiation = context;
|
||||
|
||||
if (err) {
|
||||
char tmp[CAAM_ERROR_STR_MAX];
|
||||
|
||||
dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
|
||||
}
|
||||
|
||||
instantiation->err = err;
|
||||
complete(&instantiation->completion);
|
||||
}
|
||||
|
||||
static int instantiate_rng(struct device *jrdev)
|
||||
{
|
||||
struct instantiate_result instantiation;
|
||||
|
||||
dma_addr_t desc_dma;
|
||||
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
|
||||
struct caam_full __iomem *topregs;
|
||||
unsigned int timeout = 100000;
|
||||
u32 *desc;
|
||||
int ret;
|
||||
int i, ret = 0;
|
||||
|
||||
desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
|
||||
if (!desc) {
|
||||
dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
|
||||
dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
build_instantiation_desc(desc);
|
||||
desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
|
||||
init_completion(&instantiation.completion);
|
||||
ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
|
||||
if (!ret) {
|
||||
wait_for_completion_interruptible(&instantiation.completion);
|
||||
ret = instantiation.err;
|
||||
if (ret)
|
||||
dev_err(jrdev, "unable to instantiate RNG\n");
|
||||
|
||||
/* Set the bit to request direct access to DECO0 */
|
||||
topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
|
||||
setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
|
||||
|
||||
while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
|
||||
--timeout)
|
||||
cpu_relax();
|
||||
|
||||
if (!timeout) {
|
||||
dev_err(ctrldev, "failed to acquire DECO 0\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
|
||||
for (i = 0; i < desc_len(desc); i++)
|
||||
topregs->deco.descbuf[i] = *(desc + i);
|
||||
|
||||
wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR);
|
||||
|
||||
timeout = 10000000;
|
||||
while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) &&
|
||||
--timeout)
|
||||
cpu_relax();
|
||||
|
||||
if (!timeout) {
|
||||
dev_err(ctrldev, "failed to instantiate RNG\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
|
||||
out:
|
||||
kfree(desc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -303,7 +301,7 @@ static int caam_probe(struct platform_device *pdev)
|
|||
if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
|
||||
!(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
|
||||
kick_trng(pdev);
|
||||
ret = instantiate_rng(ctrlpriv->jrdev[0]);
|
||||
ret = instantiate_rng(dev);
|
||||
if (ret) {
|
||||
caam_remove(pdev);
|
||||
return ret;
|
||||
|
@ -315,9 +313,6 @@ static int caam_probe(struct platform_device *pdev)
|
|||
|
||||
/* NOTE: RTIC detection ought to go here, around Si time */
|
||||
|
||||
/* Initialize queue allocator lock */
|
||||
spin_lock_init(&ctrlpriv->jr_alloc_lock);
|
||||
|
||||
caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
|
||||
|
||||
/* Report "alive" for developer to see */
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#define CAAM_CMD_SZ sizeof(u32)
|
||||
#define CAAM_PTR_SZ sizeof(dma_addr_t)
|
||||
#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
|
||||
#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
|
||||
|
||||
#ifdef DEBUG
|
||||
#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
|
||||
|
|
|
@ -9,9 +9,6 @@
|
|||
#ifndef INTERN_H
|
||||
#define INTERN_H
|
||||
|
||||
#define JOBR_UNASSIGNED 0
|
||||
#define JOBR_ASSIGNED 1
|
||||
|
||||
/* Currently comes from Kconfig param as a ^2 (driver-required) */
|
||||
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
|
||||
|
||||
|
@ -46,7 +43,6 @@ struct caam_drv_private_jr {
|
|||
struct caam_job_ring __iomem *rregs; /* JobR's register space */
|
||||
struct tasklet_struct irqtask;
|
||||
int irq; /* One per queue */
|
||||
int assign; /* busy/free */
|
||||
|
||||
/* Job ring info */
|
||||
int ringsize; /* Size of rings (assume input = output) */
|
||||
|
@ -68,7 +64,6 @@ struct caam_drv_private {
|
|||
|
||||
struct device *dev;
|
||||
struct device **jrdev; /* Alloc'ed array per sub-device */
|
||||
spinlock_t jr_alloc_lock;
|
||||
struct platform_device *pdev;
|
||||
|
||||
/* Physical-presence section */
|
||||
|
|
|
@ -125,72 +125,6 @@ static void caam_jr_dequeue(unsigned long devarg)
|
|||
clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
|
||||
}
|
||||
|
||||
/**
|
||||
* caam_jr_register() - Alloc a ring for someone to use as needed. Returns
|
||||
* an ordinal of the rings allocated, else returns -ENODEV if no rings
|
||||
* are available.
|
||||
* @ctrldev: points to the controller level dev (parent) that
|
||||
* owns rings available for use.
|
||||
* @dev: points to where a pointer to the newly allocated queue's
|
||||
* dev can be written to if successful.
|
||||
**/
|
||||
int caam_jr_register(struct device *ctrldev, struct device **rdev)
|
||||
{
|
||||
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
|
||||
struct caam_drv_private_jr *jrpriv = NULL;
|
||||
int ring;
|
||||
|
||||
/* Lock, if free ring - assign, unlock */
|
||||
spin_lock(&ctrlpriv->jr_alloc_lock);
|
||||
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
|
||||
jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
|
||||
if (jrpriv->assign == JOBR_UNASSIGNED) {
|
||||
jrpriv->assign = JOBR_ASSIGNED;
|
||||
*rdev = ctrlpriv->jrdev[ring];
|
||||
spin_unlock(&ctrlpriv->jr_alloc_lock);
|
||||
return ring;
|
||||
}
|
||||
}
|
||||
|
||||
/* If assigned, write dev where caller needs it */
|
||||
spin_unlock(&ctrlpriv->jr_alloc_lock);
|
||||
*rdev = NULL;
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL(caam_jr_register);
|
||||
|
||||
/**
|
||||
* caam_jr_deregister() - Deregister an API and release the queue.
|
||||
* Returns 0 if OK, -EBUSY if queue still contains pending entries
|
||||
* or unprocessed results at the time of the call
|
||||
* @dev - points to the dev that identifies the queue to
|
||||
* be released.
|
||||
**/
|
||||
int caam_jr_deregister(struct device *rdev)
|
||||
{
|
||||
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
|
||||
struct caam_drv_private *ctrlpriv;
|
||||
|
||||
/* Get the owning controller's private space */
|
||||
ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
|
||||
|
||||
/*
|
||||
* Make sure ring empty before release
|
||||
*/
|
||||
if (rd_reg32(&jrpriv->rregs->outring_used) ||
|
||||
(rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
|
||||
return -EBUSY;
|
||||
|
||||
/* Release ring */
|
||||
spin_lock(&ctrlpriv->jr_alloc_lock);
|
||||
jrpriv->assign = JOBR_UNASSIGNED;
|
||||
spin_unlock(&ctrlpriv->jr_alloc_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(caam_jr_deregister);
|
||||
|
||||
/**
|
||||
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
|
||||
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
|
||||
|
@ -379,7 +313,6 @@ static int caam_jr_init(struct device *dev)
|
|||
(JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
|
||||
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
|
||||
|
||||
jrp->assign = JOBR_UNASSIGNED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -8,8 +8,6 @@
|
|||
#define JR_H
|
||||
|
||||
/* Prototypes for backend-level services exposed to APIs */
|
||||
int caam_jr_register(struct device *ctrldev, struct device **rdev);
|
||||
int caam_jr_deregister(struct device *rdev);
|
||||
int caam_jr_enqueue(struct device *dev, u32 *desc,
|
||||
void (*cbk)(struct device *dev, u32 *desc, u32 status,
|
||||
void *areq),
|
||||
|
|
|
@ -95,9 +95,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
|||
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
|
@ -110,7 +110,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
|||
wait_for_completion_interruptible(&result.completion);
|
||||
ret = result.err;
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
|
||||
split_key_pad_len, 1);
|
||||
#endif
|
||||
|
|
|
@ -341,6 +341,8 @@ struct caam_ctrl {
|
|||
#define MCFGR_DMA_RESET 0x10000000
|
||||
#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
|
||||
#define SCFGR_RDBENABLE 0x00000400
|
||||
#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */
|
||||
#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/
|
||||
|
||||
/* AXI read cache control */
|
||||
#define MCFGR_ARCACHE_SHIFT 12
|
||||
|
@ -703,9 +705,16 @@ struct caam_deco {
|
|||
struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */
|
||||
u32 rsvd29[48];
|
||||
u32 descbuf[64]; /* DxDESB - Descriptor buffer */
|
||||
u32 rsvd30[320];
|
||||
u32 rscvd30[193];
|
||||
u32 desc_dbg; /* DxDDR - DECO Debug Register */
|
||||
u32 rsvd31[126];
|
||||
};
|
||||
|
||||
/* DECO DBG Register Valid Bit*/
|
||||
#define DECO_DBG_VALID 0x80000000
|
||||
#define DECO_JQCR_WHL 0x20000000
|
||||
#define DECO_JQCR_FOUR 0x10000000
|
||||
|
||||
/*
|
||||
* Current top-level view of memory map is:
|
||||
*
|
||||
|
@ -733,6 +742,7 @@ struct caam_full {
|
|||
u64 rsvd[512];
|
||||
struct caam_assurance assure;
|
||||
struct caam_queue_if qi;
|
||||
struct caam_deco deco;
|
||||
};
|
||||
|
||||
#endif /* REGS_H */
|
||||
|
|
|
@ -70,35 +70,52 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
|
|||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
unsigned long irq_flags;
|
||||
unsigned int processed = 0, to_process;
|
||||
u32 max_sg_len;
|
||||
int rc;
|
||||
|
||||
if (nbytes > nx_ctx->ap->databytelen)
|
||||
return -EINVAL;
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
if (enc)
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
else
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
|
||||
csbcpb->cpb.aes_cbc.iv);
|
||||
if (rc)
|
||||
goto out;
|
||||
do {
|
||||
to_process = min_t(u64, nbytes - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
|
||||
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
|
||||
processed, csbcpb->cpb.aes_cbc.iv);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < nbytes);
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -179,13 +179,26 @@ static int generate_pat(u8 *iv,
|
|||
struct nx_sg *nx_insg = nx_ctx->in_sg;
|
||||
struct nx_sg *nx_outsg = nx_ctx->out_sg;
|
||||
unsigned int iauth_len = 0;
|
||||
struct vio_pfo_op *op = NULL;
|
||||
u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
|
||||
int rc;
|
||||
|
||||
/* zero the ctr value */
|
||||
memset(iv + 15 - iv[0], 0, iv[0] + 1);
|
||||
|
||||
/* page 78 of nx_wb.pdf has,
|
||||
* Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
|
||||
* in length. If a full message is used, the AES CCA implementation
|
||||
* restricts the maximum AAD length to 2^32 -1 bytes.
|
||||
* If partial messages are used, the implementation supports
|
||||
* 2^64 -1 bytes maximum AAD length.
|
||||
*
|
||||
* However, in the cryptoapi's aead_request structure,
|
||||
* assoclen is an unsigned int, thus it cannot hold a length
|
||||
* value greater than 2^32 - 1.
|
||||
* Thus the AAD is further constrained by this and is never
|
||||
* greater than 2^32.
|
||||
*/
|
||||
|
||||
if (!req->assoclen) {
|
||||
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
|
||||
} else if (req->assoclen <= 14) {
|
||||
|
@ -195,7 +208,46 @@ static int generate_pat(u8 *iv,
|
|||
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
|
||||
b1 = nx_ctx->priv.ccm.iauth_tag;
|
||||
iauth_len = req->assoclen;
|
||||
} else if (req->assoclen <= 65280) {
|
||||
/* if associated data is less than (2^16 - 2^8), we construct
|
||||
* B1 differently and feed in the associated data to a CCA
|
||||
* operation */
|
||||
b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
|
||||
b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
|
||||
iauth_len = 14;
|
||||
} else {
|
||||
b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
|
||||
b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
|
||||
iauth_len = 10;
|
||||
}
|
||||
|
||||
/* generate B0 */
|
||||
rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* generate B1:
|
||||
* add control info for associated data
|
||||
* RFC 3610 and NIST Special Publication 800-38C
|
||||
*/
|
||||
if (b1) {
|
||||
memset(b1, 0, 16);
|
||||
if (req->assoclen <= 65280) {
|
||||
*(u16 *)b1 = (u16)req->assoclen;
|
||||
scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
|
||||
iauth_len, SCATTERWALK_FROM_SG);
|
||||
} else {
|
||||
*(u16 *)b1 = (u16)(0xfffe);
|
||||
*(u32 *)&b1[2] = (u32)req->assoclen;
|
||||
scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
|
||||
iauth_len, SCATTERWALK_FROM_SG);
|
||||
}
|
||||
}
|
||||
|
||||
/* now copy any remaining AAD to scatterlist and call nx... */
|
||||
if (!req->assoclen) {
|
||||
return rc;
|
||||
} else if (req->assoclen <= 14) {
|
||||
nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
|
||||
nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
|
||||
nx_ctx->ap->sglen);
|
||||
|
@ -210,56 +262,74 @@ static int generate_pat(u8 *iv,
|
|||
NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
|
||||
op = &nx_ctx->op;
|
||||
result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
|
||||
} else if (req->assoclen <= 65280) {
|
||||
/* if associated data is less than (2^16 - 2^8), we construct
|
||||
* B1 differently and feed in the associated data to a CCA
|
||||
* operation */
|
||||
b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
|
||||
b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
|
||||
iauth_len = 14;
|
||||
|
||||
/* remaining assoc data must have scatterlist built for it */
|
||||
nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen,
|
||||
req->assoc, iauth_len,
|
||||
req->assoclen - iauth_len);
|
||||
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
|
||||
sizeof(struct nx_sg);
|
||||
|
||||
op = &nx_ctx->op_aead;
|
||||
result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
|
||||
} else {
|
||||
/* if associated data is less than (2^32), we construct B1
|
||||
* differently yet again and feed in the associated data to a
|
||||
* CCA operation */
|
||||
pr_err("associated data len is %u bytes (returning -EINVAL)\n",
|
||||
req->assoclen);
|
||||
rc = -EINVAL;
|
||||
}
|
||||
|
||||
rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
|
||||
if (rc)
|
||||
goto done;
|
||||
|
||||
if (b1) {
|
||||
memset(b1, 0, 16);
|
||||
*(u16 *)b1 = (u16)req->assoclen;
|
||||
|
||||
scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
|
||||
iauth_len, SCATTERWALK_FROM_SG);
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, op,
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto done;
|
||||
return rc;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
|
||||
|
||||
memcpy(out, result, AES_BLOCK_SIZE);
|
||||
} else {
|
||||
u32 max_sg_len;
|
||||
unsigned int processed = 0, to_process;
|
||||
|
||||
/* page_limit: number of sg entries that fit on one page */
|
||||
max_sg_len = min_t(u32,
|
||||
nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
processed += iauth_len;
|
||||
|
||||
do {
|
||||
to_process = min_t(u32, req->assoclen - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
|
||||
if ((to_process + processed) < req->assoclen) {
|
||||
NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
|
||||
NX_FDM_INTERMEDIATE;
|
||||
} else {
|
||||
NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
|
||||
~NX_FDM_INTERMEDIATE;
|
||||
}
|
||||
|
||||
nx_insg = nx_walk_and_build(nx_ctx->in_sg,
|
||||
nx_ctx->ap->sglen,
|
||||
req->assoc, processed,
|
||||
to_process);
|
||||
|
||||
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
|
||||
sizeof(struct nx_sg);
|
||||
|
||||
result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
|
||||
nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
|
||||
AES_BLOCK_SIZE);
|
||||
|
||||
NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(req->assoclen,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < req->assoclen);
|
||||
|
||||
result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
|
||||
}
|
||||
done:
|
||||
|
||||
memcpy(out, result, AES_BLOCK_SIZE);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -271,10 +341,12 @@ static int ccm_nx_decrypt(struct aead_request *req,
|
|||
unsigned int nbytes = req->cryptlen;
|
||||
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
|
||||
struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
|
||||
unsigned long irq_flags;
|
||||
unsigned int processed = 0, to_process;
|
||||
u32 max_sg_len;
|
||||
int rc = -1;
|
||||
|
||||
if (nbytes > nx_ctx->ap->databytelen)
|
||||
return -EINVAL;
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
nbytes -= authsize;
|
||||
|
||||
|
@ -288,26 +360,61 @@ static int ccm_nx_decrypt(struct aead_request *req,
|
|||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
|
||||
csbcpb->cpb.aes_ccm.iv_or_ctr);
|
||||
if (rc)
|
||||
goto out;
|
||||
/* page_limit: number of sg entries that fit on one page */
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
|
||||
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE;
|
||||
do {
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
/* to_process: the AES_BLOCK_SIZE data chunk to process in this
|
||||
* update. This value is bound by sg list limits.
|
||||
*/
|
||||
to_process = min_t(u64, nbytes - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
|
||||
if ((to_process + processed) < nbytes)
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
else
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
|
||||
|
||||
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
|
||||
to_process, processed,
|
||||
csbcpb->cpb.aes_ccm.iv_or_ctr);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
/* for partial completion, copy following for next
|
||||
* entry into loop...
|
||||
*/
|
||||
memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
|
||||
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
|
||||
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
|
||||
memcpy(csbcpb->cpb.aes_ccm.in_s0,
|
||||
csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
|
||||
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
/* update stats */
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < nbytes);
|
||||
|
||||
rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
|
||||
authsize) ? -EBADMSG : 0;
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -318,38 +425,76 @@ static int ccm_nx_encrypt(struct aead_request *req,
|
|||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
unsigned int nbytes = req->cryptlen;
|
||||
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
|
||||
unsigned long irq_flags;
|
||||
unsigned int processed = 0, to_process;
|
||||
u32 max_sg_len;
|
||||
int rc = -1;
|
||||
|
||||
if (nbytes > nx_ctx->ap->databytelen)
|
||||
return -EINVAL;
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
|
||||
csbcpb->cpb.aes_ccm.in_pat_or_b0);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
|
||||
csbcpb->cpb.aes_ccm.iv_or_ctr);
|
||||
if (rc)
|
||||
goto out;
|
||||
/* page_limit: number of sg entries that fit on one page */
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
|
||||
do {
|
||||
/* to process: the AES_BLOCK_SIZE data chunk to process in this
|
||||
* update. This value is bound by sg list limits.
|
||||
*/
|
||||
to_process = min_t(u64, nbytes - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
if ((to_process + processed) < nbytes)
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
else
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
|
||||
to_process, processed,
|
||||
csbcpb->cpb.aes_ccm.iv_or_ctr);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
/* for partial completion, copy following for next
|
||||
* entry into loop...
|
||||
*/
|
||||
memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
|
||||
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
|
||||
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
|
||||
memcpy(csbcpb->cpb.aes_ccm.in_s0,
|
||||
csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
|
||||
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
/* update stats */
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
|
||||
} while (processed < nbytes);
|
||||
|
||||
/* copy out the auth tag */
|
||||
scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
|
||||
req->dst, nbytes, authsize,
|
||||
SCATTERWALK_TO_SG);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -88,30 +88,48 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
|
|||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
unsigned long irq_flags;
|
||||
unsigned int processed = 0, to_process;
|
||||
u32 max_sg_len;
|
||||
int rc;
|
||||
|
||||
if (nbytes > nx_ctx->ap->databytelen)
|
||||
return -EINVAL;
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
|
||||
csbcpb->cpb.aes_ctr.iv);
|
||||
if (rc)
|
||||
goto out;
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
do {
|
||||
to_process = min_t(u64, nbytes - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
|
||||
processed, csbcpb->cpb.aes_ctr.iv);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < nbytes);
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -70,34 +70,52 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
|
|||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
unsigned long irq_flags;
|
||||
unsigned int processed = 0, to_process;
|
||||
u32 max_sg_len;
|
||||
int rc;
|
||||
|
||||
if (nbytes > nx_ctx->ap->databytelen)
|
||||
return -EINVAL;
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
if (enc)
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
else
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL);
|
||||
if (rc)
|
||||
goto out;
|
||||
do {
|
||||
to_process = min_t(u64, nbytes - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
|
||||
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
|
||||
processed, NULL);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < nbytes);
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -125,38 +125,187 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
|
|||
struct aead_request *req,
|
||||
u8 *out)
|
||||
{
|
||||
int rc;
|
||||
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
|
||||
int rc = -EINVAL;
|
||||
struct scatter_walk walk;
|
||||
struct nx_sg *nx_sg = nx_ctx->in_sg;
|
||||
unsigned int nbytes = req->assoclen;
|
||||
unsigned int processed = 0, to_process;
|
||||
u32 max_sg_len;
|
||||
|
||||
if (req->assoclen > nx_ctx->ap->databytelen)
|
||||
goto out;
|
||||
|
||||
if (req->assoclen <= AES_BLOCK_SIZE) {
|
||||
if (nbytes <= AES_BLOCK_SIZE) {
|
||||
scatterwalk_start(&walk, req->assoc);
|
||||
scatterwalk_copychunks(out, &walk, req->assoclen,
|
||||
SCATTERWALK_FROM_SG);
|
||||
scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
|
||||
scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
|
||||
|
||||
rc = 0;
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0,
|
||||
req->assoclen);
|
||||
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg);
|
||||
NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
/* page_limit: number of sg entries that fit on one page */
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
|
||||
do {
|
||||
/*
|
||||
* to_process: the data chunk to process in this update.
|
||||
* This value is bound by sg list limits.
|
||||
*/
|
||||
to_process = min_t(u64, nbytes - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
|
||||
if ((to_process + processed) < nbytes)
|
||||
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
|
||||
else
|
||||
NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
|
||||
|
||||
nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
|
||||
req->assoc, processed, to_process);
|
||||
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
|
||||
* sizeof(struct nx_sg);
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
|
||||
csbcpb_aead->cpb.aes_gca.out_pat,
|
||||
AES_BLOCK_SIZE);
|
||||
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < nbytes);
|
||||
|
||||
memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
|
||||
{
|
||||
int rc;
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
struct nx_sg *nx_sg;
|
||||
unsigned int nbytes = req->assoclen;
|
||||
unsigned int processed = 0, to_process;
|
||||
u32 max_sg_len;
|
||||
|
||||
/* Set GMAC mode */
|
||||
csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
|
||||
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
|
||||
|
||||
/* page_limit: number of sg entries that fit on one page */
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
/* Copy IV */
|
||||
memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
|
||||
|
||||
do {
|
||||
/*
|
||||
* to_process: the data chunk to process in this update.
|
||||
* This value is bound by sg list limits.
|
||||
*/
|
||||
to_process = min_t(u64, nbytes - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
|
||||
if ((to_process + processed) < nbytes)
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
else
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
|
||||
|
||||
nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
|
||||
req->assoc, processed, to_process);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
|
||||
* sizeof(struct nx_sg);
|
||||
|
||||
csbcpb->cpb.aes_gcm.bit_length_data = 0;
|
||||
csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
|
||||
csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
|
||||
memcpy(csbcpb->cpb.aes_gcm.in_s0,
|
||||
csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
|
||||
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < nbytes);
|
||||
|
||||
out:
|
||||
/* Restore GCM mode */
|
||||
csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
|
||||
int enc)
|
||||
{
|
||||
int rc;
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
char out[AES_BLOCK_SIZE];
|
||||
struct nx_sg *in_sg, *out_sg;
|
||||
|
||||
/* For scenarios where the input message is zero length, AES CTR mode
|
||||
* may be used. Set the source data to be a single block (16B) of all
|
||||
* zeros, and set the input IV value to be the same as the GMAC IV
|
||||
* value. - nx_wb 4.8.1.3 */
|
||||
|
||||
/* Change to ECB mode */
|
||||
csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
|
||||
memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
|
||||
sizeof(csbcpb->cpb.aes_ecb.key));
|
||||
if (enc)
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
else
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
|
||||
|
||||
/* Encrypt the counter/IV */
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
|
||||
AES_BLOCK_SIZE, nx_ctx->ap->sglen);
|
||||
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out),
|
||||
nx_ctx->ap->sglen);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
||||
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
|
||||
/* Copy out the auth tag */
|
||||
memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
|
||||
crypto_aead_authsize(crypto_aead_reqtfm(req)));
|
||||
out:
|
||||
/* Restore XCBC mode */
|
||||
csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
|
||||
|
||||
/*
|
||||
* ECB key uses the same region that GCM AAD and counter, so it's safe
|
||||
* to just fill it with zeroes.
|
||||
*/
|
||||
memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -166,88 +315,104 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
|
|||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
struct blkcipher_desc desc;
|
||||
unsigned int nbytes = req->cryptlen;
|
||||
unsigned int processed = 0, to_process;
|
||||
unsigned long irq_flags;
|
||||
u32 max_sg_len;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (nbytes > nx_ctx->ap->databytelen)
|
||||
goto out;
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
desc.info = nx_ctx->priv.gcm.iv;
|
||||
/* initialize the counter */
|
||||
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
|
||||
|
||||
/* For scenarios where the input message is zero length, AES CTR mode
|
||||
* may be used. Set the source data to be a single block (16B) of all
|
||||
* zeros, and set the input IV value to be the same as the GMAC IV
|
||||
* value. - nx_wb 4.8.1.3 */
|
||||
if (nbytes == 0) {
|
||||
char src[AES_BLOCK_SIZE] = {};
|
||||
struct scatterlist sg;
|
||||
|
||||
desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
|
||||
if (IS_ERR(desc.tfm)) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key,
|
||||
NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
|
||||
NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);
|
||||
|
||||
sg_init_one(&sg, src, AES_BLOCK_SIZE);
|
||||
if (enc)
|
||||
crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg,
|
||||
AES_BLOCK_SIZE);
|
||||
if (req->assoclen == 0)
|
||||
rc = gcm_empty(req, &desc, enc);
|
||||
else
|
||||
crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg,
|
||||
AES_BLOCK_SIZE);
|
||||
crypto_free_blkcipher(desc.tfm);
|
||||
|
||||
rc = 0;
|
||||
goto out;
|
||||
rc = gmac(req, &desc);
|
||||
if (rc)
|
||||
goto out;
|
||||
else
|
||||
goto mac;
|
||||
}
|
||||
|
||||
desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
|
||||
|
||||
/* Process associated data */
|
||||
csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
|
||||
|
||||
if (req->assoclen) {
|
||||
rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (enc)
|
||||
/* Set flags for encryption */
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
|
||||
if (enc) {
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
else
|
||||
} else {
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
|
||||
nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
|
||||
}
|
||||
|
||||
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
|
||||
/* page_limit: number of sg entries that fit on one page */
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes,
|
||||
csbcpb->cpb.aes_gcm.iv_or_cnt);
|
||||
if (rc)
|
||||
goto out;
|
||||
do {
|
||||
/*
|
||||
* to_process: the data chunk to process in this update.
|
||||
* This value is bound by sg list limits.
|
||||
*/
|
||||
to_process = min_t(u64, nbytes - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
if ((to_process + processed) < nbytes)
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
else
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
|
||||
desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
|
||||
rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
|
||||
req->src, to_process, processed,
|
||||
csbcpb->cpb.aes_gcm.iv_or_cnt);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
|
||||
memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
|
||||
csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
|
||||
memcpy(csbcpb->cpb.aes_gcm.in_s0,
|
||||
csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
|
||||
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(csbcpb->csb.processed_byte_count,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < nbytes);
|
||||
|
||||
mac:
|
||||
if (enc) {
|
||||
/* copy out the auth tag */
|
||||
scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
|
||||
req->dst, nbytes,
|
||||
crypto_aead_authsize(crypto_aead_reqtfm(req)),
|
||||
SCATTERWALK_TO_SG);
|
||||
} else if (req->assoclen) {
|
||||
} else {
|
||||
u8 *itag = nx_ctx->priv.gcm.iauth_tag;
|
||||
u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
|
||||
|
||||
scatterwalk_map_and_copy(itag, req->dst, nbytes,
|
||||
scatterwalk_map_and_copy(itag, req->src, nbytes,
|
||||
crypto_aead_authsize(crypto_aead_reqtfm(req)),
|
||||
SCATTERWALK_FROM_SG);
|
||||
rc = memcmp(itag, otag,
|
||||
|
@ -255,6 +420,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
|
|||
-EBADMSG : 0;
|
||||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -56,6 +56,77 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Based on RFC 3566, for a zero-length message:
|
||||
*
|
||||
* n = 1
|
||||
* K1 = E(K, 0x01010101010101010101010101010101)
|
||||
* K3 = E(K, 0x03030303030303030303030303030303)
|
||||
* E[0] = 0x00000000000000000000000000000000
|
||||
* M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
|
||||
* E[1] = (K1, M[1] ^ E[0] ^ K3)
|
||||
* Tag = M[1]
|
||||
*/
|
||||
static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
struct nx_sg *in_sg, *out_sg;
|
||||
u8 keys[2][AES_BLOCK_SIZE];
|
||||
u8 key[32];
|
||||
int rc = 0;
|
||||
|
||||
/* Change to ECB mode */
|
||||
csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
|
||||
memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE);
|
||||
memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE);
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
|
||||
/* K1 and K3 base patterns */
|
||||
memset(keys[0], 0x01, sizeof(keys[0]));
|
||||
memset(keys[1], 0x03, sizeof(keys[1]));
|
||||
|
||||
/* Generate K1 and K3 encrypting the patterns */
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys),
|
||||
nx_ctx->ap->sglen);
|
||||
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys),
|
||||
nx_ctx->ap->sglen);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
||||
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
|
||||
/* XOr K3 with the padding for a 0 length message */
|
||||
keys[1][0] ^= 0x80;
|
||||
|
||||
/* Encrypt the final result */
|
||||
memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]),
|
||||
nx_ctx->ap->sglen);
|
||||
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
|
||||
nx_ctx->ap->sglen);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
||||
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
|
||||
out:
|
||||
/* Restore XCBC mode */
|
||||
csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
|
||||
memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE);
|
||||
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int nx_xcbc_init(struct shash_desc *desc)
|
||||
{
|
||||
struct xcbc_state *sctx = shash_desc_ctx(desc);
|
||||
|
@ -88,76 +159,99 @@ static int nx_xcbc_update(struct shash_desc *desc,
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
struct nx_sg *in_sg;
|
||||
u32 to_process, leftover;
|
||||
u32 to_process, leftover, total;
|
||||
u32 max_sg_len;
|
||||
unsigned long irq_flags;
|
||||
int rc = 0;
|
||||
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
/* we've hit the nx chip previously and we're updating again,
|
||||
* so copy over the partial digest */
|
||||
memcpy(csbcpb->cpb.aes_xcbc.cv,
|
||||
csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
|
||||
}
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
|
||||
total = sctx->count + len;
|
||||
|
||||
/* 2 cases for total data len:
|
||||
* 1: <= AES_BLOCK_SIZE: copy into state, return 0
|
||||
* 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
|
||||
*/
|
||||
if (len + sctx->count <= AES_BLOCK_SIZE) {
|
||||
if (total <= AES_BLOCK_SIZE) {
|
||||
memcpy(sctx->buffer + sctx->count, data, len);
|
||||
sctx->count += len;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* to_process: the AES_BLOCK_SIZE data chunk to process in this
|
||||
* update */
|
||||
to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1);
|
||||
leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1);
|
||||
in_sg = nx_ctx->in_sg;
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
/* the hardware will not accept a 0 byte operation for this algorithm
|
||||
* and the operation MUST be finalized to be correct. So if we happen
|
||||
* to get an update that falls on a block sized boundary, we must
|
||||
* save off the last block to finalize with later. */
|
||||
if (!leftover) {
|
||||
to_process -= AES_BLOCK_SIZE;
|
||||
leftover = AES_BLOCK_SIZE;
|
||||
}
|
||||
do {
|
||||
|
||||
if (sctx->count) {
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer,
|
||||
sctx->count, nx_ctx->ap->sglen);
|
||||
in_sg = nx_build_sg_list(in_sg, (u8 *)data,
|
||||
to_process - sctx->count,
|
||||
nx_ctx->ap->sglen);
|
||||
/* to_process: the AES_BLOCK_SIZE data chunk to process in this
|
||||
* update */
|
||||
to_process = min_t(u64, total, nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
|
||||
leftover = total - to_process;
|
||||
|
||||
/* the hardware will not accept a 0 byte operation for this
|
||||
* algorithm and the operation MUST be finalized to be correct.
|
||||
* So if we happen to get an update that falls on a block sized
|
||||
* boundary, we must save off the last block to finalize with
|
||||
* later. */
|
||||
if (!leftover) {
|
||||
to_process -= AES_BLOCK_SIZE;
|
||||
leftover = AES_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (sctx->count) {
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg,
|
||||
(u8 *) sctx->buffer,
|
||||
sctx->count,
|
||||
max_sg_len);
|
||||
}
|
||||
in_sg = nx_build_sg_list(in_sg,
|
||||
(u8 *) data,
|
||||
to_process - sctx->count,
|
||||
max_sg_len);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
|
||||
sizeof(struct nx_sg);
|
||||
} else {
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process,
|
||||
nx_ctx->ap->sglen);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
|
||||
sizeof(struct nx_sg);
|
||||
}
|
||||
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
/* we've hit the nx chip previously and we're updating again,
|
||||
* so copy over the partial digest */
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
memcpy(csbcpb->cpb.aes_xcbc.cv,
|
||||
csbcpb->cpb.aes_xcbc.out_cv_mac,
|
||||
AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
|
||||
/* everything after the first update is continuation */
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
total -= to_process;
|
||||
data += to_process - sctx->count;
|
||||
sctx->count = 0;
|
||||
in_sg = nx_ctx->in_sg;
|
||||
} while (leftover > AES_BLOCK_SIZE);
|
||||
|
||||
/* copy the leftover back into the state struct */
|
||||
memcpy(sctx->buffer, data + len - leftover, leftover);
|
||||
memcpy(sctx->buffer, data, leftover);
|
||||
sctx->count = leftover;
|
||||
|
||||
/* everything after the first update is continuation */
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -167,21 +261,23 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
struct nx_sg *in_sg, *out_sg;
|
||||
unsigned long irq_flags;
|
||||
int rc = 0;
|
||||
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
/* we've hit the nx chip previously, now we're finalizing,
|
||||
* so copy over the partial digest */
|
||||
memcpy(csbcpb->cpb.aes_xcbc.cv,
|
||||
csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
|
||||
} else if (sctx->count == 0) {
|
||||
/* we've never seen an update, so this is a 0 byte op. The
|
||||
* hardware cannot handle a 0 byte op, so just copy out the
|
||||
* known 0 byte result. This is cheaper than allocating a
|
||||
* software context to do a 0 byte op */
|
||||
u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
|
||||
0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 };
|
||||
memcpy(out, data, sizeof(data));
|
||||
/*
|
||||
* we've never seen an update, so this is a 0 byte op. The
|
||||
* hardware cannot handle a 0 byte op, so just ECB to
|
||||
* generate the hash.
|
||||
*/
|
||||
rc = nx_xcbc_empty(desc, out);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -211,6 +307,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
|
|||
|
||||
memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,71 +55,91 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
struct nx_sg *in_sg;
|
||||
u64 to_process, leftover;
|
||||
u64 to_process, leftover, total;
|
||||
u32 max_sg_len;
|
||||
unsigned long irq_flags;
|
||||
int rc = 0;
|
||||
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
/* we've hit the nx chip previously and we're updating again,
|
||||
* so copy over the partial digest */
|
||||
memcpy(csbcpb->cpb.sha256.input_partial_digest,
|
||||
csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
|
||||
}
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
/* 2 cases for total data len:
|
||||
* 1: <= SHA256_BLOCK_SIZE: copy into state, return 0
|
||||
* 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
|
||||
* 1: < SHA256_BLOCK_SIZE: copy into state, return 0
|
||||
* 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
|
||||
*/
|
||||
if (len + sctx->count < SHA256_BLOCK_SIZE) {
|
||||
total = sctx->count + len;
|
||||
if (total < SHA256_BLOCK_SIZE) {
|
||||
memcpy(sctx->buf + sctx->count, data, len);
|
||||
sctx->count += len;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* to_process: the SHA256_BLOCK_SIZE data chunk to process in this
|
||||
* update */
|
||||
to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1);
|
||||
leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1);
|
||||
in_sg = nx_ctx->in_sg;
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
if (sctx->count) {
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
|
||||
sctx->count, nx_ctx->ap->sglen);
|
||||
in_sg = nx_build_sg_list(in_sg, (u8 *)data,
|
||||
do {
|
||||
/*
|
||||
* to_process: the SHA256_BLOCK_SIZE data chunk to process in
|
||||
* this update. This value is also restricted by the sg list
|
||||
* limits.
|
||||
*/
|
||||
to_process = min_t(u64, total, nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
|
||||
leftover = total - to_process;
|
||||
|
||||
if (sctx->count) {
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg,
|
||||
(u8 *) sctx->buf,
|
||||
sctx->count, max_sg_len);
|
||||
}
|
||||
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
|
||||
to_process - sctx->count,
|
||||
nx_ctx->ap->sglen);
|
||||
max_sg_len);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
|
||||
sizeof(struct nx_sg);
|
||||
} else {
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
|
||||
to_process, nx_ctx->ap->sglen);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
|
||||
sizeof(struct nx_sg);
|
||||
}
|
||||
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
/*
|
||||
* we've hit the nx chip previously and we're updating
|
||||
* again, so copy over the partial digest.
|
||||
*/
|
||||
memcpy(csbcpb->cpb.sha256.input_partial_digest,
|
||||
csbcpb->cpb.sha256.message_digest,
|
||||
SHA256_DIGEST_SIZE);
|
||||
}
|
||||
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->sha256_ops));
|
||||
atomic_inc(&(nx_ctx->stats->sha256_ops));
|
||||
csbcpb->cpb.sha256.message_bit_length += (u64)
|
||||
(csbcpb->cpb.sha256.spbc * 8);
|
||||
|
||||
/* everything after the first update is continuation */
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
total -= to_process;
|
||||
data += to_process - sctx->count;
|
||||
sctx->count = 0;
|
||||
in_sg = nx_ctx->in_sg;
|
||||
} while (leftover >= SHA256_BLOCK_SIZE);
|
||||
|
||||
/* copy the leftover back into the state struct */
|
||||
if (leftover)
|
||||
memcpy(sctx->buf, data + len - leftover, leftover);
|
||||
memcpy(sctx->buf, data, leftover);
|
||||
sctx->count = leftover;
|
||||
|
||||
csbcpb->cpb.sha256.message_bit_length += (u64)
|
||||
(csbcpb->cpb.sha256.spbc * 8);
|
||||
|
||||
/* everything after the first update is continuation */
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -129,8 +149,13 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
struct nx_sg *in_sg, *out_sg;
|
||||
u32 max_sg_len;
|
||||
unsigned long irq_flags;
|
||||
int rc;
|
||||
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
|
||||
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
/* we've hit the nx chip previously, now we're finalizing,
|
||||
|
@ -146,9 +171,9 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
|
|||
csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
|
||||
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
|
||||
sctx->count, nx_ctx->ap->sglen);
|
||||
sctx->count, max_sg_len);
|
||||
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
|
||||
nx_ctx->ap->sglen);
|
||||
max_sg_len);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
||||
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
|
||||
|
||||
|
@ -168,6 +193,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
|
|||
&(nx_ctx->stats->sha256_bytes));
|
||||
memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -177,6 +203,9 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
struct sha256_state *octx = out;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
octx->count = sctx->count +
|
||||
(csbcpb->cpb.sha256.message_bit_length / 8);
|
||||
|
@ -199,6 +228,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
|
|||
octx->state[7] = SHA256_H7;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -208,6 +238,9 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
const struct sha256_state *ictx = in;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
|
||||
|
||||
|
@ -222,6 +255,7 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
|
|||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,73 +55,93 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
struct nx_sg *in_sg;
|
||||
u64 to_process, leftover, spbc_bits;
|
||||
u64 to_process, leftover, total, spbc_bits;
|
||||
u32 max_sg_len;
|
||||
unsigned long irq_flags;
|
||||
int rc = 0;
|
||||
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
/* we've hit the nx chip previously and we're updating again,
|
||||
* so copy over the partial digest */
|
||||
memcpy(csbcpb->cpb.sha512.input_partial_digest,
|
||||
csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
|
||||
}
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
/* 2 cases for total data len:
|
||||
* 1: <= SHA512_BLOCK_SIZE: copy into state, return 0
|
||||
* 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
|
||||
* 1: < SHA512_BLOCK_SIZE: copy into state, return 0
|
||||
* 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
|
||||
*/
|
||||
if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
|
||||
total = sctx->count[0] + len;
|
||||
if (total < SHA512_BLOCK_SIZE) {
|
||||
memcpy(sctx->buf + sctx->count[0], data, len);
|
||||
sctx->count[0] += len;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* to_process: the SHA512_BLOCK_SIZE data chunk to process in this
|
||||
* update */
|
||||
to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1);
|
||||
leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1);
|
||||
in_sg = nx_ctx->in_sg;
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
|
||||
nx_ctx->ap->sglen);
|
||||
|
||||
if (sctx->count[0]) {
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
|
||||
sctx->count[0], nx_ctx->ap->sglen);
|
||||
in_sg = nx_build_sg_list(in_sg, (u8 *)data,
|
||||
do {
|
||||
/*
|
||||
* to_process: the SHA512_BLOCK_SIZE data chunk to process in
|
||||
* this update. This value is also restricted by the sg list
|
||||
* limits.
|
||||
*/
|
||||
to_process = min_t(u64, total, nx_ctx->ap->databytelen);
|
||||
to_process = min_t(u64, to_process,
|
||||
NX_PAGE_SIZE * (max_sg_len - 1));
|
||||
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
|
||||
leftover = total - to_process;
|
||||
|
||||
if (sctx->count[0]) {
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg,
|
||||
(u8 *) sctx->buf,
|
||||
sctx->count[0], max_sg_len);
|
||||
}
|
||||
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
|
||||
to_process - sctx->count[0],
|
||||
nx_ctx->ap->sglen);
|
||||
max_sg_len);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
|
||||
sizeof(struct nx_sg);
|
||||
} else {
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
|
||||
to_process, nx_ctx->ap->sglen);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
|
||||
sizeof(struct nx_sg);
|
||||
}
|
||||
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
/*
|
||||
* we've hit the nx chip previously and we're updating
|
||||
* again, so copy over the partial digest.
|
||||
*/
|
||||
memcpy(csbcpb->cpb.sha512.input_partial_digest,
|
||||
csbcpb->cpb.sha512.message_digest,
|
||||
SHA512_DIGEST_SIZE);
|
||||
}
|
||||
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->sha512_ops));
|
||||
atomic_inc(&(nx_ctx->stats->sha512_ops));
|
||||
spbc_bits = csbcpb->cpb.sha512.spbc * 8;
|
||||
csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
|
||||
if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
|
||||
csbcpb->cpb.sha512.message_bit_length_hi++;
|
||||
|
||||
/* everything after the first update is continuation */
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
total -= to_process;
|
||||
data += to_process - sctx->count[0];
|
||||
sctx->count[0] = 0;
|
||||
in_sg = nx_ctx->in_sg;
|
||||
} while (leftover >= SHA512_BLOCK_SIZE);
|
||||
|
||||
/* copy the leftover back into the state struct */
|
||||
if (leftover)
|
||||
memcpy(sctx->buf, data + len - leftover, leftover);
|
||||
memcpy(sctx->buf, data, leftover);
|
||||
sctx->count[0] = leftover;
|
||||
|
||||
spbc_bits = csbcpb->cpb.sha512.spbc * 8;
|
||||
csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
|
||||
if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
|
||||
csbcpb->cpb.sha512.message_bit_length_hi++;
|
||||
|
||||
/* everything after the first update is continuation */
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -131,9 +151,15 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
struct nx_sg *in_sg, *out_sg;
|
||||
u32 max_sg_len;
|
||||
u64 count0;
|
||||
unsigned long irq_flags;
|
||||
int rc;
|
||||
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
|
||||
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
/* we've hit the nx chip previously, now we're finalizing,
|
||||
* so copy over the partial digest */
|
||||
|
@ -152,9 +178,9 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
|
|||
csbcpb->cpb.sha512.message_bit_length_hi++;
|
||||
|
||||
in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
|
||||
nx_ctx->ap->sglen);
|
||||
max_sg_len);
|
||||
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
|
||||
nx_ctx->ap->sglen);
|
||||
max_sg_len);
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
||||
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
|
||||
|
||||
|
@ -174,6 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
|
|||
|
||||
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
|
||||
out:
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -183,6 +210,9 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
struct sha512_state *octx = out;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
/* move message_bit_length (128 bits) into count and convert its value
|
||||
* to bytes */
|
||||
|
@ -214,6 +244,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
|
|||
octx->state[7] = SHA512_H7;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -223,6 +254,9 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
|
|||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
|
||||
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
|
||||
const struct sha512_state *ictx = in;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
|
||||
sctx->count[0] = ictx->count[0] & 0x3f;
|
||||
|
@ -240,6 +274,7 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
|
|||
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
|
|||
|
||||
do {
|
||||
rc = vio_h_cop_sync(viodev, op);
|
||||
} while ((rc == -EBUSY && !may_sleep && retries--) ||
|
||||
(rc == -EBUSY && may_sleep && cond_resched()));
|
||||
} while (rc == -EBUSY && !may_sleep && retries--);
|
||||
|
||||
if (rc) {
|
||||
dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
|
||||
|
@ -114,13 +113,29 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
|
|||
* have been described (or @sgmax elements have been written), the
|
||||
* loop ends. min_t is used to ensure @end_addr falls on the same page
|
||||
* as sg_addr, if not, we need to create another nx_sg element for the
|
||||
* data on the next page */
|
||||
* data on the next page.
|
||||
*
|
||||
* Also when using vmalloc'ed data, every time that a system page
|
||||
* boundary is crossed the physical address needs to be re-calculated.
|
||||
*/
|
||||
for (sg = sg_head; sg_len < len; sg++) {
|
||||
u64 next_page;
|
||||
|
||||
sg->addr = sg_addr;
|
||||
sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr);
|
||||
sg->len = sg_addr - sg->addr;
|
||||
sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
|
||||
end_addr);
|
||||
|
||||
next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
|
||||
sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
|
||||
sg_len += sg->len;
|
||||
|
||||
if (sg_addr >= next_page &&
|
||||
is_vmalloc_addr(start_addr + sg_len)) {
|
||||
sg_addr = page_to_phys(vmalloc_to_page(
|
||||
start_addr + sg_len));
|
||||
end_addr = sg_addr + len - sg_len;
|
||||
}
|
||||
|
||||
if ((sg - sg_head) == sgmax) {
|
||||
pr_err("nx: scatter/gather list overflow, pid: %d\n",
|
||||
current->pid);
|
||||
|
@ -196,6 +211,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
|
|||
* @dst: destination scatterlist
|
||||
* @src: source scatterlist
|
||||
* @nbytes: length of data described in the scatterlists
|
||||
* @offset: number of bytes to fast-forward past at the beginning of
|
||||
* scatterlists.
|
||||
* @iv: destination for the iv data, if the algorithm requires it
|
||||
*
|
||||
* This is common code shared by all the AES algorithms. It uses the block
|
||||
|
@ -207,6 +224,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
|
|||
struct scatterlist *dst,
|
||||
struct scatterlist *src,
|
||||
unsigned int nbytes,
|
||||
unsigned int offset,
|
||||
u8 *iv)
|
||||
{
|
||||
struct nx_sg *nx_insg = nx_ctx->in_sg;
|
||||
|
@ -215,8 +233,10 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
|
|||
if (iv)
|
||||
memcpy(iv, desc->info, AES_BLOCK_SIZE);
|
||||
|
||||
nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes);
|
||||
nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes);
|
||||
nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src,
|
||||
offset, nbytes);
|
||||
nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst,
|
||||
offset, nbytes);
|
||||
|
||||
/* these lengths should be negative, which will indicate to phyp that
|
||||
* the input and output parameters are scatterlists, not linear
|
||||
|
@ -235,6 +255,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
|
|||
*/
|
||||
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
|
||||
{
|
||||
spin_lock_init(&nx_ctx->lock);
|
||||
memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
|
||||
nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ struct nx_ctr_priv {
|
|||
};
|
||||
|
||||
struct nx_crypto_ctx {
|
||||
spinlock_t lock; /* synchronize access to the context */
|
||||
void *kmem; /* unaligned, kmalloc'd buffer */
|
||||
size_t kmem_len; /* length of kmem */
|
||||
struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */
|
||||
|
@ -155,7 +156,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
|
|||
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32);
|
||||
int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
|
||||
struct scatterlist *, struct scatterlist *, unsigned int,
|
||||
u8 *);
|
||||
unsigned int, u8 *);
|
||||
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
|
||||
struct scatterlist *, unsigned int,
|
||||
unsigned int);
|
||||
|
|
|
@ -13,7 +13,9 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "%s: " fmt, __func__
|
||||
#define pr_fmt(fmt) "%20s: " fmt, __func__
|
||||
#define prn(num) pr_debug(#num "=%d\n", num)
|
||||
#define prx(num) pr_debug(#num "=%x\n", num)
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -38,6 +40,8 @@
|
|||
#define DST_MAXBURST 4
|
||||
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
|
||||
|
||||
#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
|
||||
|
||||
/* OMAP TRM gives bitfields as start:end, where start is the higher bit
|
||||
number. For example 7:0 */
|
||||
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
|
||||
|
@ -74,6 +78,10 @@
|
|||
|
||||
#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
|
||||
|
||||
#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
|
||||
#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
|
||||
#define AES_REG_IRQ_DATA_IN BIT(1)
|
||||
#define AES_REG_IRQ_DATA_OUT BIT(2)
|
||||
#define DEFAULT_TIMEOUT (5*HZ)
|
||||
|
||||
#define FLAGS_MODE_MASK 0x000f
|
||||
|
@ -86,6 +94,8 @@
|
|||
#define FLAGS_FAST BIT(5)
|
||||
#define FLAGS_BUSY BIT(6)
|
||||
|
||||
#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
|
||||
|
||||
struct omap_aes_ctx {
|
||||
struct omap_aes_dev *dd;
|
||||
|
||||
|
@ -119,6 +129,8 @@ struct omap_aes_pdata {
|
|||
u32 data_ofs;
|
||||
u32 rev_ofs;
|
||||
u32 mask_ofs;
|
||||
u32 irq_enable_ofs;
|
||||
u32 irq_status_ofs;
|
||||
|
||||
u32 dma_enable_in;
|
||||
u32 dma_enable_out;
|
||||
|
@ -146,25 +158,32 @@ struct omap_aes_dev {
|
|||
struct tasklet_struct queue_task;
|
||||
|
||||
struct ablkcipher_request *req;
|
||||
size_t total;
|
||||
struct scatterlist *in_sg;
|
||||
struct scatterlist in_sgl;
|
||||
size_t in_offset;
|
||||
struct scatterlist *out_sg;
|
||||
struct scatterlist out_sgl;
|
||||
size_t out_offset;
|
||||
|
||||
size_t buflen;
|
||||
void *buf_in;
|
||||
size_t dma_size;
|
||||
/*
|
||||
* total is used by PIO mode for book keeping so introduce
|
||||
* variable total_save as need it to calc page_order
|
||||
*/
|
||||
size_t total;
|
||||
size_t total_save;
|
||||
|
||||
struct scatterlist *in_sg;
|
||||
struct scatterlist *out_sg;
|
||||
|
||||
/* Buffers for copying for unaligned cases */
|
||||
struct scatterlist in_sgl;
|
||||
struct scatterlist out_sgl;
|
||||
struct scatterlist *orig_out;
|
||||
int sgs_copied;
|
||||
|
||||
struct scatter_walk in_walk;
|
||||
struct scatter_walk out_walk;
|
||||
int dma_in;
|
||||
struct dma_chan *dma_lch_in;
|
||||
dma_addr_t dma_addr_in;
|
||||
void *buf_out;
|
||||
int dma_out;
|
||||
struct dma_chan *dma_lch_out;
|
||||
dma_addr_t dma_addr_out;
|
||||
|
||||
int in_sg_len;
|
||||
int out_sg_len;
|
||||
int pio_only;
|
||||
const struct omap_aes_pdata *pdata;
|
||||
};
|
||||
|
||||
|
@ -172,16 +191,36 @@ struct omap_aes_dev {
|
|||
static LIST_HEAD(dev_list);
|
||||
static DEFINE_SPINLOCK(list_lock);
|
||||
|
||||
#ifdef DEBUG
|
||||
#define omap_aes_read(dd, offset) \
|
||||
({ \
|
||||
int _read_ret; \
|
||||
_read_ret = __raw_readl(dd->io_base + offset); \
|
||||
pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \
|
||||
offset, _read_ret); \
|
||||
_read_ret; \
|
||||
})
|
||||
#else
|
||||
static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
|
||||
{
|
||||
return __raw_readl(dd->io_base + offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
#define omap_aes_write(dd, offset, value) \
|
||||
do { \
|
||||
pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
|
||||
offset, value); \
|
||||
__raw_writel(value, dd->io_base + offset); \
|
||||
} while (0)
|
||||
#else
|
||||
static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
|
||||
u32 value)
|
||||
{
|
||||
__raw_writel(value, dd->io_base + offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
|
||||
u32 value, u32 mask)
|
||||
|
@ -323,33 +362,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
|
|||
dd->dma_lch_out = NULL;
|
||||
dd->dma_lch_in = NULL;
|
||||
|
||||
dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
|
||||
dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
|
||||
dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
|
||||
dd->buflen &= ~(AES_BLOCK_SIZE - 1);
|
||||
|
||||
if (!dd->buf_in || !dd->buf_out) {
|
||||
dev_err(dd->dev, "unable to alloc pages.\n");
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
/* MAP here */
|
||||
dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
|
||||
dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
|
||||
err = -EINVAL;
|
||||
goto err_map_in;
|
||||
}
|
||||
|
||||
dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
|
||||
dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
|
||||
err = -EINVAL;
|
||||
goto err_map_out;
|
||||
}
|
||||
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
|
@ -376,14 +388,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
|
|||
err_dma_out:
|
||||
dma_release_channel(dd->dma_lch_in);
|
||||
err_dma_in:
|
||||
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
|
||||
DMA_FROM_DEVICE);
|
||||
err_map_out:
|
||||
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
|
||||
err_map_in:
|
||||
free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
|
||||
free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
|
||||
err_alloc:
|
||||
if (err)
|
||||
pr_err("error: %d\n", err);
|
||||
return err;
|
||||
|
@ -393,11 +397,6 @@ static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
|
|||
{
|
||||
dma_release_channel(dd->dma_lch_out);
|
||||
dma_release_channel(dd->dma_lch_in);
|
||||
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
|
||||
free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
|
||||
free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
|
||||
}
|
||||
|
||||
static void sg_copy_buf(void *buf, struct scatterlist *sg,
|
||||
|
@ -414,59 +413,27 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg,
|
|||
scatterwalk_done(&walk, out, 0);
|
||||
}
|
||||
|
||||
static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
|
||||
size_t buflen, size_t total, int out)
|
||||
{
|
||||
unsigned int count, off = 0;
|
||||
|
||||
while (buflen && total) {
|
||||
count = min((*sg)->length - *offset, total);
|
||||
count = min(count, buflen);
|
||||
|
||||
if (!count)
|
||||
return off;
|
||||
|
||||
/*
|
||||
* buflen and total are AES_BLOCK_SIZE size aligned,
|
||||
* so count should be also aligned
|
||||
*/
|
||||
|
||||
sg_copy_buf(buf + off, *sg, *offset, count, out);
|
||||
|
||||
off += count;
|
||||
buflen -= count;
|
||||
*offset += count;
|
||||
total -= count;
|
||||
|
||||
if (*offset == (*sg)->length) {
|
||||
*sg = sg_next(*sg);
|
||||
if (*sg)
|
||||
*offset = 0;
|
||||
else
|
||||
total = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return off;
|
||||
}
|
||||
|
||||
static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
|
||||
struct scatterlist *in_sg, struct scatterlist *out_sg)
|
||||
struct scatterlist *in_sg, struct scatterlist *out_sg,
|
||||
int in_sg_len, int out_sg_len)
|
||||
{
|
||||
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct omap_aes_dev *dd = ctx->dd;
|
||||
struct dma_async_tx_descriptor *tx_in, *tx_out;
|
||||
struct dma_slave_config cfg;
|
||||
dma_addr_t dma_addr_in = sg_dma_address(in_sg);
|
||||
int ret, length = sg_dma_len(in_sg);
|
||||
int ret;
|
||||
|
||||
pr_debug("len: %d\n", length);
|
||||
if (dd->pio_only) {
|
||||
scatterwalk_start(&dd->in_walk, dd->in_sg);
|
||||
scatterwalk_start(&dd->out_walk, dd->out_sg);
|
||||
|
||||
dd->dma_size = length;
|
||||
/* Enable DATAIN interrupt and let it take
|
||||
care of the rest */
|
||||
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(dd->flags & FLAGS_FAST))
|
||||
dma_sync_single_for_device(dd->dev, dma_addr_in, length,
|
||||
DMA_TO_DEVICE);
|
||||
dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
|
||||
|
@ -485,7 +452,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
|
|||
return ret;
|
||||
}
|
||||
|
||||
tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
|
||||
tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
|
||||
DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!tx_in) {
|
||||
|
@ -504,7 +471,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
|
|||
return ret;
|
||||
}
|
||||
|
||||
tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
|
||||
tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
|
||||
DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!tx_out) {
|
||||
|
@ -522,7 +489,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
|
|||
dma_async_issue_pending(dd->dma_lch_out);
|
||||
|
||||
/* start DMA */
|
||||
dd->pdata->trigger(dd, length);
|
||||
dd->pdata->trigger(dd, dd->total);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -531,93 +498,32 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
|
|||
{
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
|
||||
crypto_ablkcipher_reqtfm(dd->req));
|
||||
int err, fast = 0, in, out;
|
||||
size_t count;
|
||||
dma_addr_t addr_in, addr_out;
|
||||
struct scatterlist *in_sg, *out_sg;
|
||||
int len32;
|
||||
int err;
|
||||
|
||||
pr_debug("total: %d\n", dd->total);
|
||||
|
||||
if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
|
||||
/* check for alignment */
|
||||
in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
|
||||
out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
|
||||
|
||||
fast = in && out;
|
||||
}
|
||||
|
||||
if (fast) {
|
||||
count = min(dd->total, sg_dma_len(dd->in_sg));
|
||||
count = min(count, sg_dma_len(dd->out_sg));
|
||||
|
||||
if (count != dd->total) {
|
||||
pr_err("request length != buffer length\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pr_debug("fast\n");
|
||||
|
||||
err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
|
||||
if (!dd->pio_only) {
|
||||
err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
|
||||
DMA_TO_DEVICE);
|
||||
if (!err) {
|
||||
dev_err(dd->dev, "dma_map_sg() error\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
|
||||
err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
|
||||
DMA_FROM_DEVICE);
|
||||
if (!err) {
|
||||
dev_err(dd->dev, "dma_map_sg() error\n");
|
||||
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
addr_in = sg_dma_address(dd->in_sg);
|
||||
addr_out = sg_dma_address(dd->out_sg);
|
||||
|
||||
in_sg = dd->in_sg;
|
||||
out_sg = dd->out_sg;
|
||||
|
||||
dd->flags |= FLAGS_FAST;
|
||||
|
||||
} else {
|
||||
/* use cache buffers */
|
||||
count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
|
||||
dd->buflen, dd->total, 0);
|
||||
|
||||
len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
|
||||
|
||||
/*
|
||||
* The data going into the AES module has been copied
|
||||
* to a local buffer and the data coming out will go
|
||||
* into a local buffer so set up local SG entries for
|
||||
* both.
|
||||
*/
|
||||
sg_init_table(&dd->in_sgl, 1);
|
||||
dd->in_sgl.offset = dd->in_offset;
|
||||
sg_dma_len(&dd->in_sgl) = len32;
|
||||
sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
|
||||
|
||||
sg_init_table(&dd->out_sgl, 1);
|
||||
dd->out_sgl.offset = dd->out_offset;
|
||||
sg_dma_len(&dd->out_sgl) = len32;
|
||||
sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
|
||||
|
||||
in_sg = &dd->in_sgl;
|
||||
out_sg = &dd->out_sgl;
|
||||
|
||||
addr_in = dd->dma_addr_in;
|
||||
addr_out = dd->dma_addr_out;
|
||||
|
||||
dd->flags &= ~FLAGS_FAST;
|
||||
|
||||
}
|
||||
|
||||
dd->total -= count;
|
||||
|
||||
err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
|
||||
if (err) {
|
||||
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
|
||||
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
|
||||
err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
|
||||
dd->out_sg_len);
|
||||
if (err && !dd->pio_only) {
|
||||
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
|
||||
dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -637,7 +543,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
|
|||
static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
|
||||
{
|
||||
int err = 0;
|
||||
size_t count;
|
||||
|
||||
pr_debug("total: %d\n", dd->total);
|
||||
|
||||
|
@ -646,23 +551,49 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
|
|||
dmaengine_terminate_all(dd->dma_lch_in);
|
||||
dmaengine_terminate_all(dd->dma_lch_out);
|
||||
|
||||
if (dd->flags & FLAGS_FAST) {
|
||||
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
|
||||
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
|
||||
} else {
|
||||
dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
|
||||
dd->dma_size, DMA_FROM_DEVICE);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* copy data */
|
||||
count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
|
||||
dd->buflen, dd->dma_size, 1);
|
||||
if (count != dd->dma_size) {
|
||||
err = -EINVAL;
|
||||
pr_err("not all data converted: %u\n", count);
|
||||
}
|
||||
int omap_aes_check_aligned(struct scatterlist *sg)
|
||||
{
|
||||
while (sg) {
|
||||
if (!IS_ALIGNED(sg->offset, 4))
|
||||
return -1;
|
||||
if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
|
||||
return -1;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int omap_aes_copy_sgs(struct omap_aes_dev *dd)
|
||||
{
|
||||
void *buf_in, *buf_out;
|
||||
int pages;
|
||||
|
||||
pages = get_order(dd->total);
|
||||
|
||||
buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
|
||||
buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
|
||||
|
||||
if (!buf_in || !buf_out) {
|
||||
pr_err("Couldn't allocated pages for unaligned cases.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return err;
|
||||
dd->orig_out = dd->out_sg;
|
||||
|
||||
sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
|
||||
|
||||
sg_init_table(&dd->in_sgl, 1);
|
||||
sg_set_buf(&dd->in_sgl, buf_in, dd->total);
|
||||
dd->in_sg = &dd->in_sgl;
|
||||
|
||||
sg_init_table(&dd->out_sgl, 1);
|
||||
sg_set_buf(&dd->out_sgl, buf_out, dd->total);
|
||||
dd->out_sg = &dd->out_sgl;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_aes_handle_queue(struct omap_aes_dev *dd,
|
||||
|
@ -698,11 +629,23 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
|
|||
/* assign new request to device */
|
||||
dd->req = req;
|
||||
dd->total = req->nbytes;
|
||||
dd->in_offset = 0;
|
||||
dd->total_save = req->nbytes;
|
||||
dd->in_sg = req->src;
|
||||
dd->out_offset = 0;
|
||||
dd->out_sg = req->dst;
|
||||
|
||||
if (omap_aes_check_aligned(dd->in_sg) ||
|
||||
omap_aes_check_aligned(dd->out_sg)) {
|
||||
if (omap_aes_copy_sgs(dd))
|
||||
pr_err("Failed to copy SGs for unaligned cases\n");
|
||||
dd->sgs_copied = 1;
|
||||
} else {
|
||||
dd->sgs_copied = 0;
|
||||
}
|
||||
|
||||
dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
|
||||
dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
|
||||
BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
|
||||
|
||||
rctx = ablkcipher_request_ctx(req);
|
||||
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
|
||||
rctx->mode &= FLAGS_MODE_MASK;
|
||||
|
@ -726,21 +669,32 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
|
|||
static void omap_aes_done_task(unsigned long data)
|
||||
{
|
||||
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
|
||||
int err;
|
||||
void *buf_in, *buf_out;
|
||||
int pages;
|
||||
|
||||
pr_debug("enter\n");
|
||||
pr_debug("enter done_task\n");
|
||||
|
||||
err = omap_aes_crypt_dma_stop(dd);
|
||||
|
||||
err = dd->err ? : err;
|
||||
|
||||
if (dd->total && !err) {
|
||||
err = omap_aes_crypt_dma_start(dd);
|
||||
if (!err)
|
||||
return; /* DMA started. Not fininishing. */
|
||||
if (!dd->pio_only) {
|
||||
dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
|
||||
dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
|
||||
DMA_FROM_DEVICE);
|
||||
omap_aes_crypt_dma_stop(dd);
|
||||
}
|
||||
|
||||
omap_aes_finish_req(dd, err);
|
||||
if (dd->sgs_copied) {
|
||||
buf_in = sg_virt(&dd->in_sgl);
|
||||
buf_out = sg_virt(&dd->out_sgl);
|
||||
|
||||
sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
|
||||
|
||||
pages = get_order(dd->total_save);
|
||||
free_pages((unsigned long)buf_in, pages);
|
||||
free_pages((unsigned long)buf_out, pages);
|
||||
}
|
||||
|
||||
omap_aes_finish_req(dd, 0);
|
||||
omap_aes_handle_queue(dd, NULL);
|
||||
|
||||
pr_debug("exit\n");
|
||||
|
@ -1002,6 +956,8 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
|
|||
.data_ofs = 0x60,
|
||||
.rev_ofs = 0x80,
|
||||
.mask_ofs = 0x84,
|
||||
.irq_status_ofs = 0x8c,
|
||||
.irq_enable_ofs = 0x90,
|
||||
.dma_enable_in = BIT(5),
|
||||
.dma_enable_out = BIT(6),
|
||||
.major_mask = 0x0700,
|
||||
|
@ -1010,6 +966,90 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
|
|||
.minor_shift = 0,
|
||||
};
|
||||
|
||||
static irqreturn_t omap_aes_irq(int irq, void *dev_id)
|
||||
{
|
||||
struct omap_aes_dev *dd = dev_id;
|
||||
u32 status, i;
|
||||
u32 *src, *dst;
|
||||
|
||||
status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
|
||||
if (status & AES_REG_IRQ_DATA_IN) {
|
||||
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
|
||||
|
||||
BUG_ON(!dd->in_sg);
|
||||
|
||||
BUG_ON(_calc_walked(in) > dd->in_sg->length);
|
||||
|
||||
src = sg_virt(dd->in_sg) + _calc_walked(in);
|
||||
|
||||
for (i = 0; i < AES_BLOCK_WORDS; i++) {
|
||||
omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
|
||||
|
||||
scatterwalk_advance(&dd->in_walk, 4);
|
||||
if (dd->in_sg->length == _calc_walked(in)) {
|
||||
dd->in_sg = scatterwalk_sg_next(dd->in_sg);
|
||||
if (dd->in_sg) {
|
||||
scatterwalk_start(&dd->in_walk,
|
||||
dd->in_sg);
|
||||
src = sg_virt(dd->in_sg) +
|
||||
_calc_walked(in);
|
||||
}
|
||||
} else {
|
||||
src++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear IRQ status */
|
||||
status &= ~AES_REG_IRQ_DATA_IN;
|
||||
omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
|
||||
|
||||
/* Enable DATA_OUT interrupt */
|
||||
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
|
||||
|
||||
} else if (status & AES_REG_IRQ_DATA_OUT) {
|
||||
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
|
||||
|
||||
BUG_ON(!dd->out_sg);
|
||||
|
||||
BUG_ON(_calc_walked(out) > dd->out_sg->length);
|
||||
|
||||
dst = sg_virt(dd->out_sg) + _calc_walked(out);
|
||||
|
||||
for (i = 0; i < AES_BLOCK_WORDS; i++) {
|
||||
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
|
||||
scatterwalk_advance(&dd->out_walk, 4);
|
||||
if (dd->out_sg->length == _calc_walked(out)) {
|
||||
dd->out_sg = scatterwalk_sg_next(dd->out_sg);
|
||||
if (dd->out_sg) {
|
||||
scatterwalk_start(&dd->out_walk,
|
||||
dd->out_sg);
|
||||
dst = sg_virt(dd->out_sg) +
|
||||
_calc_walked(out);
|
||||
}
|
||||
} else {
|
||||
dst++;
|
||||
}
|
||||
}
|
||||
|
||||
dd->total -= AES_BLOCK_SIZE;
|
||||
|
||||
BUG_ON(dd->total < 0);
|
||||
|
||||
/* Clear IRQ status */
|
||||
status &= ~AES_REG_IRQ_DATA_OUT;
|
||||
omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
|
||||
|
||||
if (!dd->total)
|
||||
/* All bytes read! */
|
||||
tasklet_schedule(&dd->done_task);
|
||||
else
|
||||
/* Enable DATA_IN interrupt for next block */
|
||||
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static const struct of_device_id omap_aes_of_match[] = {
|
||||
{
|
||||
.compatible = "ti,omap2-aes",
|
||||
|
@ -1115,10 +1155,10 @@ static int omap_aes_probe(struct platform_device *pdev)
|
|||
struct omap_aes_dev *dd;
|
||||
struct crypto_alg *algp;
|
||||
struct resource res;
|
||||
int err = -ENOMEM, i, j;
|
||||
int err = -ENOMEM, i, j, irq = -1;
|
||||
u32 reg;
|
||||
|
||||
dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
|
||||
dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
|
||||
if (dd == NULL) {
|
||||
dev_err(dev, "unable to alloc data struct.\n");
|
||||
goto err_data;
|
||||
|
@ -1158,8 +1198,23 @@ static int omap_aes_probe(struct platform_device *pdev)
|
|||
tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
|
||||
|
||||
err = omap_aes_dma_init(dd);
|
||||
if (err)
|
||||
goto err_dma;
|
||||
if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
|
||||
dd->pio_only = 1;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "can't get IRQ resource\n");
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
err = devm_request_irq(dev, irq, omap_aes_irq, 0,
|
||||
dev_name(dev), dd);
|
||||
if (err) {
|
||||
dev_err(dev, "Unable to grab omap-aes IRQ\n");
|
||||
goto err_irq;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
INIT_LIST_HEAD(&dd->list);
|
||||
spin_lock(&list_lock);
|
||||
|
@ -1187,13 +1242,13 @@ err_algs:
|
|||
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
|
||||
crypto_unregister_alg(
|
||||
&dd->pdata->algs_info[i].algs_list[j]);
|
||||
omap_aes_dma_cleanup(dd);
|
||||
err_dma:
|
||||
if (!dd->pio_only)
|
||||
omap_aes_dma_cleanup(dd);
|
||||
err_irq:
|
||||
tasklet_kill(&dd->done_task);
|
||||
tasklet_kill(&dd->queue_task);
|
||||
pm_runtime_disable(dev);
|
||||
err_res:
|
||||
kfree(dd);
|
||||
dd = NULL;
|
||||
err_data:
|
||||
dev_err(dev, "initialization failed.\n");
|
||||
|
@ -1221,7 +1276,6 @@ static int omap_aes_remove(struct platform_device *pdev)
|
|||
tasklet_kill(&dd->queue_task);
|
||||
omap_aes_dma_cleanup(dd);
|
||||
pm_runtime_disable(dd->dev);
|
||||
kfree(dd);
|
||||
dd = NULL;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -44,17 +44,13 @@
|
|||
#include <crypto/hash.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
|
||||
#define MD5_DIGEST_SIZE 16
|
||||
|
||||
#define DST_MAXBURST 16
|
||||
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
|
||||
|
||||
#define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
|
||||
#define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
|
||||
#define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
|
||||
|
||||
#define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04))
|
||||
#define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
|
||||
|
||||
#define SHA_REG_CTRL 0x18
|
||||
#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
|
||||
|
@ -75,18 +71,21 @@
|
|||
#define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
|
||||
#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
|
||||
|
||||
#define SHA_REG_MODE 0x44
|
||||
#define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
|
||||
#define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
|
||||
#define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
|
||||
#define SHA_REG_MODE_CLOSE_HASH (1 << 4)
|
||||
#define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
|
||||
#define SHA_REG_MODE_ALGO_MASK (3 << 1)
|
||||
#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
|
||||
#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
|
||||
#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
|
||||
#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
|
||||
|
||||
#define SHA_REG_LENGTH 0x48
|
||||
#define SHA_REG_MODE_ALGO_MASK (7 << 0)
|
||||
#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
|
||||
#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
|
||||
#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
|
||||
#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
|
||||
#define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
|
||||
#define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
|
||||
|
||||
#define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
|
||||
|
||||
#define SHA_REG_IRQSTATUS 0x118
|
||||
#define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
|
||||
|
@ -117,18 +116,16 @@
|
|||
#define FLAGS_SG 17
|
||||
|
||||
#define FLAGS_MODE_SHIFT 18
|
||||
#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \
|
||||
<< (FLAGS_MODE_SHIFT - 1))
|
||||
#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \
|
||||
<< (FLAGS_MODE_SHIFT - 1))
|
||||
#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \
|
||||
<< (FLAGS_MODE_SHIFT - 1))
|
||||
#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 \
|
||||
<< (FLAGS_MODE_SHIFT - 1))
|
||||
#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 \
|
||||
<< (FLAGS_MODE_SHIFT - 1))
|
||||
#define FLAGS_HMAC 20
|
||||
#define FLAGS_ERROR 21
|
||||
#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
|
||||
#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
|
||||
#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
|
||||
#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
|
||||
#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
|
||||
#define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
|
||||
#define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
|
||||
|
||||
#define FLAGS_HMAC 21
|
||||
#define FLAGS_ERROR 22
|
||||
|
||||
#define OP_UPDATE 1
|
||||
#define OP_FINAL 2
|
||||
|
@ -145,7 +142,7 @@ struct omap_sham_reqctx {
|
|||
unsigned long flags;
|
||||
unsigned long op;
|
||||
|
||||
u8 digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED;
|
||||
u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
|
||||
size_t digcnt;
|
||||
size_t bufcnt;
|
||||
size_t buflen;
|
||||
|
@ -162,8 +159,8 @@ struct omap_sham_reqctx {
|
|||
|
||||
struct omap_sham_hmac_ctx {
|
||||
struct crypto_shash *shash;
|
||||
u8 ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
|
||||
u8 opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
|
||||
u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
|
||||
u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
|
||||
};
|
||||
|
||||
struct omap_sham_ctx {
|
||||
|
@ -205,6 +202,8 @@ struct omap_sham_pdata {
|
|||
u32 rev_ofs;
|
||||
u32 mask_ofs;
|
||||
u32 sysstatus_ofs;
|
||||
u32 mode_ofs;
|
||||
u32 length_ofs;
|
||||
|
||||
u32 major_mask;
|
||||
u32 major_shift;
|
||||
|
@ -223,6 +222,7 @@ struct omap_sham_dev {
|
|||
unsigned int dma;
|
||||
struct dma_chan *dma_lch;
|
||||
struct tasklet_struct done_task;
|
||||
u8 polling_mode;
|
||||
|
||||
unsigned long flags;
|
||||
struct crypto_queue queue;
|
||||
|
@ -306,9 +306,9 @@ static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
|
|||
for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
|
||||
if (out)
|
||||
opad[i] = omap_sham_read(dd,
|
||||
SHA_REG_ODIGEST(i));
|
||||
SHA_REG_ODIGEST(dd, i));
|
||||
else
|
||||
omap_sham_write(dd, SHA_REG_ODIGEST(i),
|
||||
omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
|
||||
opad[i]);
|
||||
}
|
||||
}
|
||||
|
@ -342,6 +342,12 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
|
|||
case FLAGS_MODE_SHA256:
|
||||
d = SHA256_DIGEST_SIZE / sizeof(u32);
|
||||
break;
|
||||
case FLAGS_MODE_SHA384:
|
||||
d = SHA384_DIGEST_SIZE / sizeof(u32);
|
||||
break;
|
||||
case FLAGS_MODE_SHA512:
|
||||
d = SHA512_DIGEST_SIZE / sizeof(u32);
|
||||
break;
|
||||
default:
|
||||
d = 0;
|
||||
}
|
||||
|
@ -404,6 +410,30 @@ static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
|
|||
return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
|
||||
}
|
||||
|
||||
static int get_block_size(struct omap_sham_reqctx *ctx)
|
||||
{
|
||||
int d;
|
||||
|
||||
switch (ctx->flags & FLAGS_MODE_MASK) {
|
||||
case FLAGS_MODE_MD5:
|
||||
case FLAGS_MODE_SHA1:
|
||||
d = SHA1_BLOCK_SIZE;
|
||||
break;
|
||||
case FLAGS_MODE_SHA224:
|
||||
case FLAGS_MODE_SHA256:
|
||||
d = SHA256_BLOCK_SIZE;
|
||||
break;
|
||||
case FLAGS_MODE_SHA384:
|
||||
case FLAGS_MODE_SHA512:
|
||||
d = SHA512_BLOCK_SIZE;
|
||||
break;
|
||||
default:
|
||||
d = 0;
|
||||
}
|
||||
|
||||
return d;
|
||||
}
|
||||
|
||||
static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
|
||||
u32 *value, int count)
|
||||
{
|
||||
|
@ -422,20 +452,24 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
|
|||
* CLOSE_HASH only for the last one. Note that flags mode bits
|
||||
* correspond to algorithm encoding in mode register.
|
||||
*/
|
||||
val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1);
|
||||
val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
|
||||
if (!ctx->digcnt) {
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
|
||||
struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
|
||||
struct omap_sham_hmac_ctx *bctx = tctx->base;
|
||||
int bs, nr_dr;
|
||||
|
||||
val |= SHA_REG_MODE_ALGO_CONSTANT;
|
||||
|
||||
if (ctx->flags & BIT(FLAGS_HMAC)) {
|
||||
bs = get_block_size(ctx);
|
||||
nr_dr = bs / (2 * sizeof(u32));
|
||||
val |= SHA_REG_MODE_HMAC_KEY_PROC;
|
||||
omap_sham_write_n(dd, SHA_REG_ODIGEST(0),
|
||||
(u32 *)bctx->ipad,
|
||||
SHA1_BLOCK_SIZE / sizeof(u32));
|
||||
ctx->digcnt += SHA1_BLOCK_SIZE;
|
||||
omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
|
||||
(u32 *)bctx->ipad, nr_dr);
|
||||
omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
|
||||
(u32 *)bctx->ipad + nr_dr, nr_dr);
|
||||
ctx->digcnt += bs;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -451,7 +485,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
|
|||
SHA_REG_MODE_HMAC_KEY_PROC;
|
||||
|
||||
dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
|
||||
omap_sham_write_mask(dd, SHA_REG_MODE, val, mask);
|
||||
omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
|
||||
omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
|
||||
omap_sham_write_mask(dd, SHA_REG_MASK(dd),
|
||||
SHA_REG_MASK_IT_EN |
|
||||
|
@ -461,7 +495,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
|
|||
|
||||
static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
|
||||
{
|
||||
omap_sham_write(dd, SHA_REG_LENGTH, length);
|
||||
omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
|
||||
}
|
||||
|
||||
static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
|
||||
|
@ -474,7 +508,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
|
|||
size_t length, int final)
|
||||
{
|
||||
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
|
||||
int count, len32;
|
||||
int count, len32, bs32, offset = 0;
|
||||
const u32 *buffer = (const u32 *)buf;
|
||||
|
||||
dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
|
||||
|
@ -486,18 +520,23 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
|
|||
/* should be non-zero before next lines to disable clocks later */
|
||||
ctx->digcnt += length;
|
||||
|
||||
if (dd->pdata->poll_irq(dd))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
if (final)
|
||||
set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
|
||||
|
||||
set_bit(FLAGS_CPU, &dd->flags);
|
||||
|
||||
len32 = DIV_ROUND_UP(length, sizeof(u32));
|
||||
bs32 = get_block_size(ctx) / sizeof(u32);
|
||||
|
||||
for (count = 0; count < len32; count++)
|
||||
omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]);
|
||||
while (len32) {
|
||||
if (dd->pdata->poll_irq(dd))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
for (count = 0; count < min(len32, bs32); count++, offset++)
|
||||
omap_sham_write(dd, SHA_REG_DIN(dd, count),
|
||||
buffer[offset]);
|
||||
len32 -= min(len32, bs32);
|
||||
}
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
@ -516,7 +555,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
|
|||
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct dma_slave_config cfg;
|
||||
int len32, ret;
|
||||
int len32, ret, dma_min = get_block_size(ctx);
|
||||
|
||||
dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
|
||||
ctx->digcnt, length, final);
|
||||
|
@ -525,7 +564,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
|
|||
|
||||
cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
|
||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
cfg.dst_maxburst = DST_MAXBURST;
|
||||
cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
||||
ret = dmaengine_slave_config(dd->dma_lch, &cfg);
|
||||
if (ret) {
|
||||
|
@ -533,7 +572,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
|
||||
len32 = DIV_ROUND_UP(length, dma_min) * dma_min;
|
||||
|
||||
if (is_sg) {
|
||||
/*
|
||||
|
@ -666,14 +705,14 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
|
|||
/* Start address alignment */
|
||||
#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
|
||||
/* SHA1 block size alignment */
|
||||
#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
|
||||
#define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs))
|
||||
|
||||
static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
|
||||
{
|
||||
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
|
||||
unsigned int length, final, tail;
|
||||
struct scatterlist *sg;
|
||||
int ret;
|
||||
int ret, bs;
|
||||
|
||||
if (!ctx->total)
|
||||
return 0;
|
||||
|
@ -687,30 +726,31 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
|
|||
* the dmaengine infrastructure will calculate that it needs
|
||||
* to transfer 0 frames which ultimately fails.
|
||||
*/
|
||||
if (ctx->total < (DST_MAXBURST * sizeof(u32)))
|
||||
if (ctx->total < get_block_size(ctx))
|
||||
return omap_sham_update_dma_slow(dd);
|
||||
|
||||
dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
|
||||
ctx->digcnt, ctx->bufcnt, ctx->total);
|
||||
|
||||
sg = ctx->sg;
|
||||
bs = get_block_size(ctx);
|
||||
|
||||
if (!SG_AA(sg))
|
||||
return omap_sham_update_dma_slow(dd);
|
||||
|
||||
if (!sg_is_last(sg) && !SG_SA(sg))
|
||||
/* size is not SHA1_BLOCK_SIZE aligned */
|
||||
if (!sg_is_last(sg) && !SG_SA(sg, bs))
|
||||
/* size is not BLOCK_SIZE aligned */
|
||||
return omap_sham_update_dma_slow(dd);
|
||||
|
||||
length = min(ctx->total, sg->length);
|
||||
|
||||
if (sg_is_last(sg)) {
|
||||
if (!(ctx->flags & BIT(FLAGS_FINUP))) {
|
||||
/* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
|
||||
tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
|
||||
/* not last sg must be BLOCK_SIZE aligned */
|
||||
tail = length & (bs - 1);
|
||||
/* without finup() we need one block to close hash */
|
||||
if (!tail)
|
||||
tail = SHA1_MD5_BLOCK_SIZE;
|
||||
tail = bs;
|
||||
length -= tail;
|
||||
}
|
||||
}
|
||||
|
@ -737,13 +777,22 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
|
|||
static int omap_sham_update_cpu(struct omap_sham_dev *dd)
|
||||
{
|
||||
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
|
||||
int bufcnt;
|
||||
int bufcnt, final;
|
||||
|
||||
if (!ctx->total)
|
||||
return 0;
|
||||
|
||||
omap_sham_append_sg(ctx);
|
||||
|
||||
final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
|
||||
|
||||
dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
|
||||
ctx->bufcnt, ctx->digcnt, final);
|
||||
|
||||
bufcnt = ctx->bufcnt;
|
||||
ctx->bufcnt = 0;
|
||||
|
||||
return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
|
||||
return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
|
||||
}
|
||||
|
||||
static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
|
||||
|
@ -773,6 +822,7 @@ static int omap_sham_init(struct ahash_request *req)
|
|||
struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
|
||||
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
||||
struct omap_sham_dev *dd = NULL, *tmp;
|
||||
int bs = 0;
|
||||
|
||||
spin_lock_bh(&sham.lock);
|
||||
if (!tctx->dd) {
|
||||
|
@ -796,15 +846,27 @@ static int omap_sham_init(struct ahash_request *req)
|
|||
switch (crypto_ahash_digestsize(tfm)) {
|
||||
case MD5_DIGEST_SIZE:
|
||||
ctx->flags |= FLAGS_MODE_MD5;
|
||||
bs = SHA1_BLOCK_SIZE;
|
||||
break;
|
||||
case SHA1_DIGEST_SIZE:
|
||||
ctx->flags |= FLAGS_MODE_SHA1;
|
||||
bs = SHA1_BLOCK_SIZE;
|
||||
break;
|
||||
case SHA224_DIGEST_SIZE:
|
||||
ctx->flags |= FLAGS_MODE_SHA224;
|
||||
bs = SHA224_BLOCK_SIZE;
|
||||
break;
|
||||
case SHA256_DIGEST_SIZE:
|
||||
ctx->flags |= FLAGS_MODE_SHA256;
|
||||
bs = SHA256_BLOCK_SIZE;
|
||||
break;
|
||||
case SHA384_DIGEST_SIZE:
|
||||
ctx->flags |= FLAGS_MODE_SHA384;
|
||||
bs = SHA384_BLOCK_SIZE;
|
||||
break;
|
||||
case SHA512_DIGEST_SIZE:
|
||||
ctx->flags |= FLAGS_MODE_SHA512;
|
||||
bs = SHA512_BLOCK_SIZE;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -816,8 +878,8 @@ static int omap_sham_init(struct ahash_request *req)
|
|||
if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
|
||||
struct omap_sham_hmac_ctx *bctx = tctx->base;
|
||||
|
||||
memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
|
||||
ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
|
||||
memcpy(ctx->buffer, bctx->ipad, bs);
|
||||
ctx->bufcnt = bs;
|
||||
}
|
||||
|
||||
ctx->flags |= BIT(FLAGS_HMAC);
|
||||
|
@ -853,8 +915,11 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
|
|||
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
||||
int err = 0, use_dma = 1;
|
||||
|
||||
if (ctx->bufcnt <= DMA_MIN)
|
||||
/* faster to handle last block with cpu */
|
||||
if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode)
|
||||
/*
|
||||
* faster to handle last block with cpu or
|
||||
* use cpu when dma is not present.
|
||||
*/
|
||||
use_dma = 0;
|
||||
|
||||
if (use_dma)
|
||||
|
@ -1006,6 +1071,8 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
|
|||
static int omap_sham_update(struct ahash_request *req)
|
||||
{
|
||||
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
||||
struct omap_sham_dev *dd = ctx->dd;
|
||||
int bs = get_block_size(ctx);
|
||||
|
||||
if (!req->nbytes)
|
||||
return 0;
|
||||
|
@ -1023,10 +1090,12 @@ static int omap_sham_update(struct ahash_request *req)
|
|||
*/
|
||||
omap_sham_append_sg(ctx);
|
||||
return 0;
|
||||
} else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
|
||||
} else if ((ctx->bufcnt + ctx->total <= bs) ||
|
||||
dd->polling_mode) {
|
||||
/*
|
||||
* faster to use CPU for short transfers
|
||||
*/
|
||||
* faster to use CPU for short transfers or
|
||||
* use cpu when dma is not present.
|
||||
*/
|
||||
ctx->flags |= BIT(FLAGS_CPU);
|
||||
}
|
||||
} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
|
||||
|
@ -1214,6 +1283,16 @@ static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
|
|||
return omap_sham_cra_init_alg(tfm, "md5");
|
||||
}
|
||||
|
||||
static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
return omap_sham_cra_init_alg(tfm, "sha384");
|
||||
}
|
||||
|
||||
static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
return omap_sham_cra_init_alg(tfm, "sha512");
|
||||
}
|
||||
|
||||
static void omap_sham_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
|
||||
|
@ -1422,6 +1501,101 @@ static struct ahash_alg algs_sha224_sha256[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct ahash_alg algs_sha384_sha512[] = {
|
||||
{
|
||||
.init = omap_sham_init,
|
||||
.update = omap_sham_update,
|
||||
.final = omap_sham_final,
|
||||
.finup = omap_sham_finup,
|
||||
.digest = omap_sham_digest,
|
||||
.halg.digestsize = SHA384_DIGEST_SIZE,
|
||||
.halg.base = {
|
||||
.cra_name = "sha384",
|
||||
.cra_driver_name = "omap-sha384",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA384_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = omap_sham_cra_init,
|
||||
.cra_exit = omap_sham_cra_exit,
|
||||
}
|
||||
},
|
||||
{
|
||||
.init = omap_sham_init,
|
||||
.update = omap_sham_update,
|
||||
.final = omap_sham_final,
|
||||
.finup = omap_sham_finup,
|
||||
.digest = omap_sham_digest,
|
||||
.halg.digestsize = SHA512_DIGEST_SIZE,
|
||||
.halg.base = {
|
||||
.cra_name = "sha512",
|
||||
.cra_driver_name = "omap-sha512",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA512_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = omap_sham_cra_init,
|
||||
.cra_exit = omap_sham_cra_exit,
|
||||
}
|
||||
},
|
||||
{
|
||||
.init = omap_sham_init,
|
||||
.update = omap_sham_update,
|
||||
.final = omap_sham_final,
|
||||
.finup = omap_sham_finup,
|
||||
.digest = omap_sham_digest,
|
||||
.setkey = omap_sham_setkey,
|
||||
.halg.digestsize = SHA384_DIGEST_SIZE,
|
||||
.halg.base = {
|
||||
.cra_name = "hmac(sha384)",
|
||||
.cra_driver_name = "omap-hmac-sha384",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA384_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
|
||||
sizeof(struct omap_sham_hmac_ctx),
|
||||
.cra_alignmask = OMAP_ALIGN_MASK,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = omap_sham_cra_sha384_init,
|
||||
.cra_exit = omap_sham_cra_exit,
|
||||
}
|
||||
},
|
||||
{
|
||||
.init = omap_sham_init,
|
||||
.update = omap_sham_update,
|
||||
.final = omap_sham_final,
|
||||
.finup = omap_sham_finup,
|
||||
.digest = omap_sham_digest,
|
||||
.setkey = omap_sham_setkey,
|
||||
.halg.digestsize = SHA512_DIGEST_SIZE,
|
||||
.halg.base = {
|
||||
.cra_name = "hmac(sha512)",
|
||||
.cra_driver_name = "omap-hmac-sha512",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA512_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
|
||||
sizeof(struct omap_sham_hmac_ctx),
|
||||
.cra_alignmask = OMAP_ALIGN_MASK,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = omap_sham_cra_sha512_init,
|
||||
.cra_exit = omap_sham_cra_exit,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
static void omap_sham_done_task(unsigned long data)
|
||||
{
|
||||
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
|
||||
|
@ -1433,8 +1607,12 @@ static void omap_sham_done_task(unsigned long data)
|
|||
}
|
||||
|
||||
if (test_bit(FLAGS_CPU, &dd->flags)) {
|
||||
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
|
||||
goto finish;
|
||||
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
|
||||
/* hash or semi-hash ready */
|
||||
err = omap_sham_update_cpu(dd);
|
||||
if (err != -EINPROGRESS)
|
||||
goto finish;
|
||||
}
|
||||
} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
|
||||
if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
|
||||
omap_sham_update_dma_stop(dd);
|
||||
|
@ -1548,11 +1726,54 @@ static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
|
|||
.poll_irq = omap_sham_poll_irq_omap4,
|
||||
.intr_hdlr = omap_sham_irq_omap4,
|
||||
.idigest_ofs = 0x020,
|
||||
.odigest_ofs = 0x0,
|
||||
.din_ofs = 0x080,
|
||||
.digcnt_ofs = 0x040,
|
||||
.rev_ofs = 0x100,
|
||||
.mask_ofs = 0x110,
|
||||
.sysstatus_ofs = 0x114,
|
||||
.mode_ofs = 0x44,
|
||||
.length_ofs = 0x48,
|
||||
.major_mask = 0x0700,
|
||||
.major_shift = 8,
|
||||
.minor_mask = 0x003f,
|
||||
.minor_shift = 0,
|
||||
};
|
||||
|
||||
static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
|
||||
{
|
||||
.algs_list = algs_sha1_md5,
|
||||
.size = ARRAY_SIZE(algs_sha1_md5),
|
||||
},
|
||||
{
|
||||
.algs_list = algs_sha224_sha256,
|
||||
.size = ARRAY_SIZE(algs_sha224_sha256),
|
||||
},
|
||||
{
|
||||
.algs_list = algs_sha384_sha512,
|
||||
.size = ARRAY_SIZE(algs_sha384_sha512),
|
||||
},
|
||||
};
|
||||
|
||||
static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
|
||||
.algs_info = omap_sham_algs_info_omap5,
|
||||
.algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
|
||||
.flags = BIT(FLAGS_AUTO_XOR),
|
||||
.digest_size = SHA512_DIGEST_SIZE,
|
||||
.copy_hash = omap_sham_copy_hash_omap4,
|
||||
.write_ctrl = omap_sham_write_ctrl_omap4,
|
||||
.trigger = omap_sham_trigger_omap4,
|
||||
.poll_irq = omap_sham_poll_irq_omap4,
|
||||
.intr_hdlr = omap_sham_irq_omap4,
|
||||
.idigest_ofs = 0x240,
|
||||
.odigest_ofs = 0x200,
|
||||
.din_ofs = 0x080,
|
||||
.digcnt_ofs = 0x280,
|
||||
.rev_ofs = 0x100,
|
||||
.mask_ofs = 0x110,
|
||||
.sysstatus_ofs = 0x114,
|
||||
.mode_ofs = 0x284,
|
||||
.length_ofs = 0x288,
|
||||
.major_mask = 0x0700,
|
||||
.major_shift = 8,
|
||||
.minor_mask = 0x003f,
|
||||
|
@ -1568,6 +1789,10 @@ static const struct of_device_id omap_sham_of_match[] = {
|
|||
.compatible = "ti,omap4-sham",
|
||||
.data = &omap_sham_pdata_omap4,
|
||||
},
|
||||
{
|
||||
.compatible = "ti,omap5-sham",
|
||||
.data = &omap_sham_pdata_omap5,
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, omap_sham_of_match);
|
||||
|
@ -1667,7 +1892,7 @@ static int omap_sham_probe(struct platform_device *pdev)
|
|||
int err, i, j;
|
||||
u32 rev;
|
||||
|
||||
dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
|
||||
dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
|
||||
if (dd == NULL) {
|
||||
dev_err(dev, "unable to alloc data struct.\n");
|
||||
err = -ENOMEM;
|
||||
|
@ -1684,20 +1909,21 @@ static int omap_sham_probe(struct platform_device *pdev)
|
|||
err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
|
||||
omap_sham_get_res_pdev(dd, pdev, &res);
|
||||
if (err)
|
||||
goto res_err;
|
||||
goto data_err;
|
||||
|
||||
dd->io_base = devm_ioremap_resource(dev, &res);
|
||||
if (IS_ERR(dd->io_base)) {
|
||||
err = PTR_ERR(dd->io_base);
|
||||
goto res_err;
|
||||
goto data_err;
|
||||
}
|
||||
dd->phys_base = res.start;
|
||||
|
||||
err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW,
|
||||
dev_name(dev), dd);
|
||||
err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
|
||||
IRQF_TRIGGER_NONE, dev_name(dev), dd);
|
||||
if (err) {
|
||||
dev_err(dev, "unable to request irq.\n");
|
||||
goto res_err;
|
||||
dev_err(dev, "unable to request irq %d, err = %d\n",
|
||||
dd->irq, err);
|
||||
goto data_err;
|
||||
}
|
||||
|
||||
dma_cap_zero(mask);
|
||||
|
@ -1706,10 +1932,8 @@ static int omap_sham_probe(struct platform_device *pdev)
|
|||
dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
|
||||
&dd->dma, dev, "rx");
|
||||
if (!dd->dma_lch) {
|
||||
dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
|
||||
dd->dma);
|
||||
err = -ENXIO;
|
||||
goto dma_err;
|
||||
dd->polling_mode = 1;
|
||||
dev_dbg(dev, "using polling mode instead of dma\n");
|
||||
}
|
||||
|
||||
dd->flags |= dd->pdata->flags;
|
||||
|
@ -1747,11 +1971,6 @@ err_algs:
|
|||
&dd->pdata->algs_info[i].algs_list[j]);
|
||||
pm_runtime_disable(dev);
|
||||
dma_release_channel(dd->dma_lch);
|
||||
dma_err:
|
||||
free_irq(dd->irq, dd);
|
||||
res_err:
|
||||
kfree(dd);
|
||||
dd = NULL;
|
||||
data_err:
|
||||
dev_err(dev, "initialization failed.\n");
|
||||
|
||||
|
@ -1776,9 +1995,6 @@ static int omap_sham_remove(struct platform_device *pdev)
|
|||
tasklet_kill(&dd->done_task);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
dma_release_channel(dd->dma_lch);
|
||||
free_irq(dd->irq, dd);
|
||||
kfree(dd);
|
||||
dd = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -417,7 +417,7 @@ static void sahara_aes_done_task(unsigned long data)
|
|||
dev->req->base.complete(&dev->req->base, dev->error);
|
||||
}
|
||||
|
||||
void sahara_watchdog(unsigned long data)
|
||||
static void sahara_watchdog(unsigned long data)
|
||||
{
|
||||
struct sahara_dev *dev = (struct sahara_dev *)data;
|
||||
unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
|
||||
|
@ -955,7 +955,7 @@ static int sahara_probe(struct platform_device *pdev)
|
|||
dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
|
||||
SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
|
||||
&dev->hw_phys_link[0], GFP_KERNEL);
|
||||
if (!dev->hw_link) {
|
||||
if (!dev->hw_link[0]) {
|
||||
dev_err(&pdev->dev, "Could not allocate hw links\n");
|
||||
err = -ENOMEM;
|
||||
goto err_link;
|
||||
|
|
|
@ -275,7 +275,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
|
|||
value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
|
||||
eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
|
||||
icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
|
||||
} while (eng_busy & (!icq_empty));
|
||||
} while (eng_busy && !icq_empty);
|
||||
aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR);
|
||||
}
|
||||
|
||||
|
@ -365,7 +365,7 @@ static int aes_set_key(struct tegra_aes_dev *dd)
|
|||
eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
|
||||
icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
|
||||
dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD;
|
||||
} while (eng_busy & (!icq_empty) & dma_busy);
|
||||
} while (eng_busy && !icq_empty && dma_busy);
|
||||
|
||||
/* settable command to get key into internal registers */
|
||||
value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT |
|
||||
|
@ -379,7 +379,7 @@ static int aes_set_key(struct tegra_aes_dev *dd)
|
|||
value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
|
||||
eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
|
||||
icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
|
||||
} while (eng_busy & (!icq_empty));
|
||||
} while (eng_busy && !icq_empty);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -113,4 +113,6 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more);
|
|||
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
|
||||
unsigned int start, unsigned int nbytes, int out);
|
||||
|
||||
int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes);
|
||||
|
||||
#endif /* _CRYPTO_SCATTERWALK_H */
|
||||
|
|
|
@ -3,6 +3,10 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CRC_T10DIF_DIGEST_SIZE 2
|
||||
#define CRC_T10DIF_BLOCK_SIZE 1
|
||||
|
||||
__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len);
|
||||
__u16 crc_t10dif(unsigned char const *, size_t);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -846,6 +846,8 @@ static int padata_cpu_callback(struct notifier_block *nfb,
|
|||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
case CPU_DOWN_FAILED:
|
||||
case CPU_DOWN_FAILED_FROZEN:
|
||||
if (!pinst_has_cpu(pinst, cpu))
|
||||
break;
|
||||
mutex_lock(&pinst->lock);
|
||||
|
@ -857,6 +859,8 @@ static int padata_cpu_callback(struct notifier_block *nfb,
|
|||
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
if (!pinst_has_cpu(pinst, cpu))
|
||||
break;
|
||||
mutex_lock(&pinst->lock);
|
||||
|
@ -865,22 +869,6 @@ static int padata_cpu_callback(struct notifier_block *nfb,
|
|||
if (err)
|
||||
return notifier_from_errno(err);
|
||||
break;
|
||||
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
if (!pinst_has_cpu(pinst, cpu))
|
||||
break;
|
||||
mutex_lock(&pinst->lock);
|
||||
__padata_remove_cpu(pinst, cpu);
|
||||
mutex_unlock(&pinst->lock);
|
||||
|
||||
case CPU_DOWN_FAILED:
|
||||
case CPU_DOWN_FAILED_FROZEN:
|
||||
if (!pinst_has_cpu(pinst, cpu))
|
||||
break;
|
||||
mutex_lock(&pinst->lock);
|
||||
__padata_add_cpu(pinst, cpu);
|
||||
mutex_unlock(&pinst->lock);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
@ -1086,18 +1074,18 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
|
|||
|
||||
pinst->flags = 0;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
pinst->cpu_notifier.notifier_call = padata_cpu_callback;
|
||||
pinst->cpu_notifier.priority = 0;
|
||||
register_hotcpu_notifier(&pinst->cpu_notifier);
|
||||
#endif
|
||||
|
||||
put_online_cpus();
|
||||
|
||||
BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
|
||||
kobject_init(&pinst->kobj, &padata_attr_type);
|
||||
mutex_init(&pinst->lock);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
pinst->cpu_notifier.notifier_call = padata_cpu_callback;
|
||||
pinst->cpu_notifier.priority = 0;
|
||||
register_hotcpu_notifier(&pinst->cpu_notifier);
|
||||
#endif
|
||||
|
||||
return pinst;
|
||||
|
||||
err_free_masks:
|
||||
|
|
|
@ -76,6 +76,8 @@ config CRC16
|
|||
|
||||
config CRC_T10DIF
|
||||
tristate "CRC calculation for the T10 Data Integrity Field"
|
||||
select CRYPTO
|
||||
select CRYPTO_CRCT10DIF
|
||||
help
|
||||
This option is only needed if a module that's not in the
|
||||
kernel tree needs to calculate CRC checks for use with the
|
||||
|
|
|
@ -11,57 +11,45 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/crc-t10dif.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <crypto/hash.h>
|
||||
|
||||
/* Table generated using the following polynomium:
|
||||
* x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
|
||||
* gt: 0x8bb7
|
||||
*/
|
||||
static const __u16 t10_dif_crc_table[256] = {
|
||||
0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
|
||||
0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
|
||||
0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
|
||||
0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
|
||||
0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
|
||||
0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
|
||||
0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
|
||||
0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
|
||||
0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
|
||||
0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
|
||||
0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
|
||||
0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
|
||||
0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
|
||||
0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
|
||||
0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
|
||||
0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
|
||||
0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
|
||||
0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
|
||||
0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
|
||||
0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
|
||||
0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
|
||||
0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
|
||||
0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
|
||||
0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
|
||||
0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
|
||||
0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
|
||||
0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
|
||||
0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
|
||||
0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
|
||||
0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
|
||||
0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
|
||||
0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
|
||||
};
|
||||
static struct crypto_shash *crct10dif_tfm;
|
||||
|
||||
__u16 crc_t10dif(const unsigned char *buffer, size_t len)
|
||||
{
|
||||
__u16 crc = 0;
|
||||
unsigned int i;
|
||||
struct {
|
||||
struct shash_desc shash;
|
||||
char ctx[2];
|
||||
} desc;
|
||||
int err;
|
||||
|
||||
for (i = 0 ; i < len ; i++)
|
||||
crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
|
||||
desc.shash.tfm = crct10dif_tfm;
|
||||
desc.shash.flags = 0;
|
||||
*(__u16 *)desc.ctx = 0;
|
||||
|
||||
return crc;
|
||||
err = crypto_shash_update(&desc.shash, buffer, len);
|
||||
BUG_ON(err);
|
||||
|
||||
return *(__u16 *)desc.ctx;
|
||||
}
|
||||
EXPORT_SYMBOL(crc_t10dif);
|
||||
|
||||
static int __init crc_t10dif_mod_init(void)
|
||||
{
|
||||
crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
|
||||
return PTR_RET(crct10dif_tfm);
|
||||
}
|
||||
|
||||
static void __exit crc_t10dif_mod_fini(void)
|
||||
{
|
||||
crypto_free_shash(crct10dif_tfm);
|
||||
}
|
||||
|
||||
module_init(crc_t10dif_mod_init);
|
||||
module_exit(crc_t10dif_mod_fini);
|
||||
|
||||
MODULE_DESCRIPTION("T10 DIF CRC calculation");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_SOFTDEP("pre: crct10dif");
|
||||
|
|
Загрузка…
Ссылка в новой задаче