Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 4.12:

  API:
   - Add batch registration for acomp/scomp
   - Change acomp testing to non-unique compressed result
   - Extend algorithm name limit to 128 bytes
   - Require setkey before accept(2) in algif_aead

  Algorithms:
   - Add support for deflate rfc1950 (zlib)

  Drivers:
   - Add accelerated crct10dif for powerpc
   - Add crc32 in stm32
   - Add sha384/sha512 in ccp
   - Add 3des/gcm(aes) for v5 devices in ccp
   - Add Queue Interface (QI) backend support in caam
   - Add new Exynos RNG driver
   - Add ThunderX ZIP driver
   - Add driver for hardware random generator on MT7623 SoC"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (101 commits)
  crypto: stm32 - Fix OF module alias information
  crypto: algif_aead - Require setkey before accept(2)
  crypto: scomp - add support for deflate rfc1950 (zlib)
  crypto: scomp - allow registration of multiple scomps
  crypto: ccp - Change ISR handler method for a v5 CCP
  crypto: ccp - Change ISR handler method for a v3 CCP
  crypto: crypto4xx - rename ce_ring_contol to ce_ring_control
  crypto: testmgr - Allow ecb(cipher_null) in FIPS mode
  Revert "crypto: arm64/sha - Add constant operand modifier to ASM_EXPORT"
  crypto: ccp - Disable interrupts early on unload
  crypto: ccp - Use only the relevant interrupt bits
  hwrng: mtk - Add driver for hardware random generator on MT7623 SoC
  dt-bindings: hwrng: Add Mediatek hardware random generator bindings
  crypto: crct10dif-vpmsum - Fix missing preempt_disable()
  crypto: testmgr - replace compression known answer test
  crypto: acomp - allow registration of multiple acomps
  hwrng: n2 - Use devm_kcalloc() in n2rng_probe()
  crypto: chcr - Fix error handling related to 'chcr_alloc_shash'
  padata: get_next is never NULL
  crypto: exynos - Add new Exynos RNG driver
  ...
This commit is contained in:
Linus Torvalds 2017-05-02 15:53:46 -07:00
Родитель 204f144c9f 929562b144
Коммит 5a0387a8a8
137 изменённых файлов: 13717 добавлений и 2487 удалений

Просмотреть файл

@ -155,9 +155,9 @@ Code Example For Use of Operational State Memory With SHASH
char ctx[];
};
static struct sdescinit_sdesc(struct crypto_shash *alg)
static struct sdesc init_sdesc(struct crypto_shash *alg)
{
struct sdescsdesc;
struct sdesc sdesc;
int size;
size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
@ -172,7 +172,7 @@ Code Example For Use of Operational State Memory With SHASH
static int calc_hash(struct crypto_shashalg,
const unsigned chardata, unsigned int datalen,
unsigned chardigest) {
struct sdescsdesc;
struct sdesc sdesc;
int ret;
sdesc = init_sdesc(alg);

Просмотреть файл

@ -0,0 +1,16 @@
* STMicroelectronics STM32 CRC
Required properties:
- compatible: Should be "st,stm32f7-crc".
- reg: The address and length of the peripheral registers space
- clocks: The input clock of the CRC instance
Optional properties: none
Example:
crc: crc@40023000 {
compatible = "st,stm32f7-crc";
reg = <0x40023000 0x400>;
clocks = <&rcc 0 12>;
};

Просмотреть файл

@ -6,9 +6,16 @@ Required properties:
- compatible : should be "amlogic,meson-rng"
- reg : Specifies base physical address and size of the registers.
Optional properties:
- clocks : phandle to the following named clocks
- clock-names: Name of core clock, must be "core"
Example:
rng {
compatible = "amlogic,meson-rng";
reg = <0x0 0xc8834000 0x0 0x4>;
compatible = "amlogic,meson-rng";
reg = <0x0 0xc8834000 0x0 0x4>;
clocks = <&clkc CLKID_RNG0>;
clock-names = "core";
};

Просмотреть файл

@ -0,0 +1,18 @@
Device-Tree bindings for Mediatek random number generator
found in Mediatek SoC family
Required properties:
- compatible : Should be "mediatek,mt7623-rng"
- clocks : list of clock specifiers, corresponding to
entries in clock-names property;
- clock-names : Should contain "rng" entries;
- reg : Specifies base physical address and size of the registers
Example:
rng: rng@1020f000 {
compatible = "mediatek,mt7623-rng";
reg = <0 0x1020f000 0 0x1000>;
clocks = <&infracfg CLK_INFRA_TRNG>;
clock-names = "rng";
};

Просмотреть файл

@ -6242,7 +6242,7 @@ F: drivers/crypto/nx/nx_csbcpb.h
F: drivers/crypto/nx/nx_debugfs.h
IBM Power 842 compression accelerator
M: Dan Streetman <ddstreet@ieee.org>
M: Haren Myneni <haren@us.ibm.com>
S: Supported
F: drivers/crypto/nx/Makefile
F: drivers/crypto/nx/Kconfig
@ -10954,6 +10954,14 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/samsung/
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
F: drivers/crypto/exynos-rng.c
F: Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt
SAMSUNG FRAMEBUFFER DRIVER
M: Jingoo Han <jingoohan1@gmail.com>
L: linux-fbdev@vger.kernel.org
@ -10978,6 +10986,14 @@ F: Documentation/devicetree/bindings/regulator/samsung,s2m*.txt
F: Documentation/devicetree/bindings/regulator/samsung,s5m*.txt
F: Documentation/devicetree/bindings/clock/samsung,s2mps11.txt
SAMSUNG S5P Security SubSystem (SSS) DRIVER
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Vladimir Zapolskiy <vz@mleia.com>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
F: drivers/crypto/s5p-sss.c
SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS
M: Kyungmin Park <kyungmin.park@samsung.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>

Просмотреть файл

@ -89,6 +89,10 @@
clock-frequency = <25000000>;
};
&crc {
status = "okay";
};
&usart1 {
pinctrl-0 = <&usart1_pins_a>;
pinctrl-names = "default";

Просмотреть файл

@ -289,6 +289,13 @@
};
};
crc: crc@40023000 {
compatible = "st,stm32f7-crc";
reg = <0x40023000 0x400>;
clocks = <&rcc 0 12>;
status = "disabled";
};
rcc: rcc@40023800 {
#clock-cells = <2>;
compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";

Просмотреть файл

@ -75,5 +75,7 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_FTRACE is not set
CONFIG_CRYPTO=y
CONFIG_CRYPTO_DEV_STM32=y
CONFIG_CRC_ITU_T=y
CONFIG_CRC7=y

Просмотреть файл

@ -73,7 +73,7 @@ config CRYPTO_AES_ARM_BS
depends on KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_SIMD
select CRYPTO_AES_ARM
select CRYPTO_AES
help
Use a faster and more secure NEON based implementation of AES in CBC,
CTR and XTS modes

Просмотреть файл

@ -42,9 +42,6 @@ asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]);
asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, const u8 in[],
u8 out[]);
struct aesbs_ctx {
int rounds;
u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE);
@ -52,12 +49,12 @@ struct aesbs_ctx {
struct aesbs_cbc_ctx {
struct aesbs_ctx key;
u32 enc[AES_MAX_KEYLENGTH_U32];
struct crypto_cipher *enc_tfm;
};
struct aesbs_xts_ctx {
struct aesbs_ctx key;
u32 twkey[AES_MAX_KEYLENGTH_U32];
struct crypto_cipher *tweak_tfm;
};
static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
@ -132,20 +129,18 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
ctx->key.rounds = 6 + key_len / 4;
memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc));
kernel_neon_begin();
aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
kernel_neon_end();
return 0;
return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
}
static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
{
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
__aes_arm_encrypt(ctx->enc, ctx->key.rounds, src, dst);
crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src);
}
static int cbc_encrypt(struct skcipher_request *req)
@ -181,6 +176,23 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
static int cbc_init(struct crypto_tfm *tfm)
{
struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ctx->enc_tfm))
return PTR_ERR(ctx->enc_tfm);
return 0;
}
static void cbc_exit(struct crypto_tfm *tfm)
{
struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->enc_tfm);
}
static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@ -228,7 +240,6 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_aes_ctx rk;
int err;
err = xts_verify_key(tfm, in_key, key_len);
@ -236,15 +247,30 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return err;
key_len /= 2;
err = crypto_aes_expand_key(&rk, in_key + key_len, key_len);
err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len);
if (err)
return err;
memcpy(ctx->twkey, rk.key_enc, sizeof(ctx->twkey));
return aesbs_setkey(tfm, in_key, key_len);
}
static int xts_init(struct crypto_tfm *tfm)
{
struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ctx->tweak_tfm))
return PTR_ERR(ctx->tweak_tfm);
return 0;
}
static void xts_exit(struct crypto_tfm *tfm)
{
struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->tweak_tfm);
}
static int __xts_crypt(struct skcipher_request *req,
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]))
@ -256,7 +282,7 @@ static int __xts_crypt(struct skcipher_request *req,
err = skcipher_walk_virt(&walk, req, true);
__aes_arm_encrypt(ctx->twkey, ctx->key.rounds, walk.iv, walk.iv);
crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
kernel_neon_begin();
while (walk.nbytes >= AES_BLOCK_SIZE) {
@ -309,6 +335,8 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_init = cbc_init,
.base.cra_exit = cbc_exit,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@ -342,6 +370,8 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_init = xts_init,
.base.cra_exit = xts_exit,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@ -402,5 +432,5 @@ unregister_simds:
return err;
}
module_init(aes_init);
late_initcall(aes_init);
module_exit(aes_exit);

Просмотреть файл

@ -380,7 +380,7 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
rng {
hwrng: rng {
compatible = "amlogic,meson-rng";
reg = <0x0 0x0 0x0 0x4>;
};

Просмотреть файл

@ -524,3 +524,8 @@
&vpu {
compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
};
&hwrng {
clocks = <&clkc CLKID_RNG0>;
clock-names = "core";
};

Просмотреть файл

@ -31,8 +31,6 @@ static void tbi_boing_init(void)
}
#endif
#define ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
/*
* Unwind the current stack frame and store the new register values in the
* structure passed as argument. Unwinding is equivalent to a function return,

Просмотреть файл

@ -10,6 +10,8 @@ obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o
obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
@ -17,3 +19,4 @@ sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o

Просмотреть файл

@ -0,0 +1,137 @@
/*
* CRC vpmsum tester
* Copyright 2017 Daniel Axtens, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/crc-t10dif.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
#include <asm/switch_to.h>
static unsigned long iterations = 10000;
#define MAX_CRC_LENGTH 65535
static int __init crc_test_init(void)
{
u16 crc16 = 0, verify16 = 0;
u32 crc32 = 0, verify32 = 0;
__le32 verify32le = 0;
unsigned char *data;
unsigned long i;
int ret;
struct crypto_shash *crct10dif_tfm;
struct crypto_shash *crc32c_tfm;
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
data = kmalloc(MAX_CRC_LENGTH, GFP_KERNEL);
if (!data)
return -ENOMEM;
crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
if (IS_ERR(crct10dif_tfm)) {
pr_err("Error allocating crc-t10dif\n");
goto free_buf;
}
crc32c_tfm = crypto_alloc_shash("crc32c", 0, 0);
if (IS_ERR(crc32c_tfm)) {
pr_err("Error allocating crc32c\n");
goto free_16;
}
do {
SHASH_DESC_ON_STACK(crct10dif_shash, crct10dif_tfm);
SHASH_DESC_ON_STACK(crc32c_shash, crc32c_tfm);
crct10dif_shash->tfm = crct10dif_tfm;
ret = crypto_shash_init(crct10dif_shash);
if (ret) {
pr_err("Error initing crc-t10dif\n");
goto free_32;
}
crc32c_shash->tfm = crc32c_tfm;
ret = crypto_shash_init(crc32c_shash);
if (ret) {
pr_err("Error initing crc32c\n");
goto free_32;
}
pr_info("crc-vpmsum_test begins, %lu iterations\n", iterations);
for (i=0; i<iterations; i++) {
size_t len, offset;
get_random_bytes(data, MAX_CRC_LENGTH);
get_random_bytes(&len, sizeof(len));
get_random_bytes(&offset, sizeof(offset));
len %= MAX_CRC_LENGTH;
offset &= 15;
if (len <= offset)
continue;
len -= offset;
crypto_shash_update(crct10dif_shash, data+offset, len);
crypto_shash_final(crct10dif_shash, (u8 *)(&crc16));
verify16 = crc_t10dif_generic(verify16, data+offset, len);
if (crc16 != verify16) {
pr_err("FAILURE in CRC16: got 0x%04x expected 0x%04x (len %lu)\n",
crc16, verify16, len);
break;
}
crypto_shash_update(crc32c_shash, data+offset, len);
crypto_shash_final(crc32c_shash, (u8 *)(&crc32));
verify32 = le32_to_cpu(verify32le);
verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len));
if (crc32 != (u32)verify32le) {
pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n",
crc32, verify32, len);
break;
}
}
pr_info("crc-vpmsum_test done, completed %lu iterations\n", i);
} while (0);
free_32:
crypto_free_shash(crc32c_tfm);
free_16:
crypto_free_shash(crct10dif_tfm);
free_buf:
kfree(data);
return 0;
}
static void __exit crc_test_exit(void) {}
module_init(crc_test_init);
module_exit(crc_test_exit);
module_param(iterations, long, 0400);
MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>");
MODULE_DESCRIPTION("Vector polynomial multiply-sum CRC tester");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -0,0 +1,755 @@
/*
* Core of the accelerated CRC algorithm.
* In your file, define the constants and CRC_FUNCTION_NAME
* Then include this file.
*
* Calculate the checksum of data that is 16 byte aligned and a multiple of
* 16 bytes.
*
* The first step is to reduce it to 1024 bits. We do this in 8 parallel
* chunks in order to mask the latency of the vpmsum instructions. If we
* have more than 32 kB of data to checksum we repeat this step multiple
* times, passing in the previous 1024 bits.
*
* The next step is to reduce the 1024 bits to 64 bits. This step adds
* 32 bits of 0s to the end - this matches what a CRC does. We just
* calculate constants that land the data in this 32 bits.
*
* We then use fixed point Barrett reduction to compute a mod n over GF(2)
* for n = CRC using POWER8 instructions. We use x = 32.
*
* http://en.wikipedia.org/wiki/Barrett_reduction
*
* Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
#include <asm/ppc-opcode.h>
#define MAX_SIZE 32768
.text
#if defined(__BIG_ENDIAN__) && defined(REFLECT)
#define BYTESWAP_DATA
#elif defined(__LITTLE_ENDIAN__) && !defined(REFLECT)
#define BYTESWAP_DATA
#else
#undef BYTESWAP_DATA
#endif
#define off16 r25
#define off32 r26
#define off48 r27
#define off64 r28
#define off80 r29
#define off96 r30
#define off112 r31
#define const1 v24
#define const2 v25
#define byteswap v26
#define mask_32bit v27
#define mask_64bit v28
#define zeroes v29
#ifdef BYTESWAP_DATA
#define VPERM(A, B, C, D) vperm A, B, C, D
#else
#define VPERM(A, B, C, D)
#endif
/* unsigned int CRC_FUNCTION_NAME(unsigned int crc, void *p, unsigned long len) */
FUNC_START(CRC_FUNCTION_NAME)
std r31,-8(r1)
std r30,-16(r1)
std r29,-24(r1)
std r28,-32(r1)
std r27,-40(r1)
std r26,-48(r1)
std r25,-56(r1)
li off16,16
li off32,32
li off48,48
li off64,64
li off80,80
li off96,96
li off112,112
li r0,0
/* Enough room for saving 10 non volatile VMX registers */
subi r6,r1,56+10*16
subi r7,r1,56+2*16
stvx v20,0,r6
stvx v21,off16,r6
stvx v22,off32,r6
stvx v23,off48,r6
stvx v24,off64,r6
stvx v25,off80,r6
stvx v26,off96,r6
stvx v27,off112,r6
stvx v28,0,r7
stvx v29,off16,r7
mr r10,r3
vxor zeroes,zeroes,zeroes
vspltisw v0,-1
vsldoi mask_32bit,zeroes,v0,4
vsldoi mask_64bit,zeroes,v0,8
/* Get the initial value into v8 */
vxor v8,v8,v8
MTVRD(v8, R3)
#ifdef REFLECT
vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */
#else
vsldoi v8,v8,zeroes,4 /* shift into top 32 bits */
#endif
#ifdef BYTESWAP_DATA
addis r3,r2,.byteswap_constant@toc@ha
addi r3,r3,.byteswap_constant@toc@l
lvx byteswap,0,r3
addi r3,r3,16
#endif
cmpdi r5,256
blt .Lshort
rldicr r6,r5,0,56
/* Checksum in blocks of MAX_SIZE */
1: lis r7,MAX_SIZE@h
ori r7,r7,MAX_SIZE@l
mr r9,r7
cmpd r6,r7
bgt 2f
mr r7,r6
2: subf r6,r7,r6
/* our main loop does 128 bytes at a time */
srdi r7,r7,7
/*
* Work out the offset into the constants table to start at. Each
* constant is 16 bytes, and it is used against 128 bytes of input
* data - 128 / 16 = 8
*/
sldi r8,r7,4
srdi r9,r9,3
subf r8,r8,r9
/* We reduce our final 128 bytes in a separate step */
addi r7,r7,-1
mtctr r7
addis r3,r2,.constants@toc@ha
addi r3,r3,.constants@toc@l
/* Find the start of our constants */
add r3,r3,r8
/* zero v0-v7 which will contain our checksums */
vxor v0,v0,v0
vxor v1,v1,v1
vxor v2,v2,v2
vxor v3,v3,v3
vxor v4,v4,v4
vxor v5,v5,v5
vxor v6,v6,v6
vxor v7,v7,v7
lvx const1,0,r3
/*
* If we are looping back to consume more data we use the values
* already in v16-v23.
*/
cmpdi r0,1
beq 2f
/* First warm up pass */
lvx v16,0,r4
lvx v17,off16,r4
VPERM(v16,v16,v16,byteswap)
VPERM(v17,v17,v17,byteswap)
lvx v18,off32,r4
lvx v19,off48,r4
VPERM(v18,v18,v18,byteswap)
VPERM(v19,v19,v19,byteswap)
lvx v20,off64,r4
lvx v21,off80,r4
VPERM(v20,v20,v20,byteswap)
VPERM(v21,v21,v21,byteswap)
lvx v22,off96,r4
lvx v23,off112,r4
VPERM(v22,v22,v22,byteswap)
VPERM(v23,v23,v23,byteswap)
addi r4,r4,8*16
/* xor in initial value */
vxor v16,v16,v8
2: bdz .Lfirst_warm_up_done
addi r3,r3,16
lvx const2,0,r3
/* Second warm up pass */
VPMSUMD(v8,v16,const1)
lvx v16,0,r4
VPERM(v16,v16,v16,byteswap)
ori r2,r2,0
VPMSUMD(v9,v17,const1)
lvx v17,off16,r4
VPERM(v17,v17,v17,byteswap)
ori r2,r2,0
VPMSUMD(v10,v18,const1)
lvx v18,off32,r4
VPERM(v18,v18,v18,byteswap)
ori r2,r2,0
VPMSUMD(v11,v19,const1)
lvx v19,off48,r4
VPERM(v19,v19,v19,byteswap)
ori r2,r2,0
VPMSUMD(v12,v20,const1)
lvx v20,off64,r4
VPERM(v20,v20,v20,byteswap)
ori r2,r2,0
VPMSUMD(v13,v21,const1)
lvx v21,off80,r4
VPERM(v21,v21,v21,byteswap)
ori r2,r2,0
VPMSUMD(v14,v22,const1)
lvx v22,off96,r4
VPERM(v22,v22,v22,byteswap)
ori r2,r2,0
VPMSUMD(v15,v23,const1)
lvx v23,off112,r4
VPERM(v23,v23,v23,byteswap)
addi r4,r4,8*16
bdz .Lfirst_cool_down
/*
* main loop. We modulo schedule it such that it takes three iterations
* to complete - first iteration load, second iteration vpmsum, third
* iteration xor.
*/
.balign 16
4: lvx const1,0,r3
addi r3,r3,16
ori r2,r2,0
vxor v0,v0,v8
VPMSUMD(v8,v16,const2)
lvx v16,0,r4
VPERM(v16,v16,v16,byteswap)
ori r2,r2,0
vxor v1,v1,v9
VPMSUMD(v9,v17,const2)
lvx v17,off16,r4
VPERM(v17,v17,v17,byteswap)
ori r2,r2,0
vxor v2,v2,v10
VPMSUMD(v10,v18,const2)
lvx v18,off32,r4
VPERM(v18,v18,v18,byteswap)
ori r2,r2,0
vxor v3,v3,v11
VPMSUMD(v11,v19,const2)
lvx v19,off48,r4
VPERM(v19,v19,v19,byteswap)
lvx const2,0,r3
ori r2,r2,0
vxor v4,v4,v12
VPMSUMD(v12,v20,const1)
lvx v20,off64,r4
VPERM(v20,v20,v20,byteswap)
ori r2,r2,0
vxor v5,v5,v13
VPMSUMD(v13,v21,const1)
lvx v21,off80,r4
VPERM(v21,v21,v21,byteswap)
ori r2,r2,0
vxor v6,v6,v14
VPMSUMD(v14,v22,const1)
lvx v22,off96,r4
VPERM(v22,v22,v22,byteswap)
ori r2,r2,0
vxor v7,v7,v15
VPMSUMD(v15,v23,const1)
lvx v23,off112,r4
VPERM(v23,v23,v23,byteswap)
addi r4,r4,8*16
bdnz 4b
.Lfirst_cool_down:
/* First cool down pass */
lvx const1,0,r3
addi r3,r3,16
vxor v0,v0,v8
VPMSUMD(v8,v16,const1)
ori r2,r2,0
vxor v1,v1,v9
VPMSUMD(v9,v17,const1)
ori r2,r2,0
vxor v2,v2,v10
VPMSUMD(v10,v18,const1)
ori r2,r2,0
vxor v3,v3,v11
VPMSUMD(v11,v19,const1)
ori r2,r2,0
vxor v4,v4,v12
VPMSUMD(v12,v20,const1)
ori r2,r2,0
vxor v5,v5,v13
VPMSUMD(v13,v21,const1)
ori r2,r2,0
vxor v6,v6,v14
VPMSUMD(v14,v22,const1)
ori r2,r2,0
vxor v7,v7,v15
VPMSUMD(v15,v23,const1)
ori r2,r2,0
.Lsecond_cool_down:
/* Second cool down pass */
vxor v0,v0,v8
vxor v1,v1,v9
vxor v2,v2,v10
vxor v3,v3,v11
vxor v4,v4,v12
vxor v5,v5,v13
vxor v6,v6,v14
vxor v7,v7,v15
#ifdef REFLECT
/*
* vpmsumd produces a 96 bit result in the least significant bits
* of the register. Since we are bit reflected we have to shift it
* left 32 bits so it occupies the least significant bits in the
* bit reflected domain.
*/
vsldoi v0,v0,zeroes,4
vsldoi v1,v1,zeroes,4
vsldoi v2,v2,zeroes,4
vsldoi v3,v3,zeroes,4
vsldoi v4,v4,zeroes,4
vsldoi v5,v5,zeroes,4
vsldoi v6,v6,zeroes,4
vsldoi v7,v7,zeroes,4
#endif
/* xor with last 1024 bits */
lvx v8,0,r4
lvx v9,off16,r4
VPERM(v8,v8,v8,byteswap)
VPERM(v9,v9,v9,byteswap)
lvx v10,off32,r4
lvx v11,off48,r4
VPERM(v10,v10,v10,byteswap)
VPERM(v11,v11,v11,byteswap)
lvx v12,off64,r4
lvx v13,off80,r4
VPERM(v12,v12,v12,byteswap)
VPERM(v13,v13,v13,byteswap)
lvx v14,off96,r4
lvx v15,off112,r4
VPERM(v14,v14,v14,byteswap)
VPERM(v15,v15,v15,byteswap)
addi r4,r4,8*16
vxor v16,v0,v8
vxor v17,v1,v9
vxor v18,v2,v10
vxor v19,v3,v11
vxor v20,v4,v12
vxor v21,v5,v13
vxor v22,v6,v14
vxor v23,v7,v15
li r0,1
cmpdi r6,0
addi r6,r6,128
bne 1b
/* Work out how many bytes we have left */
andi. r5,r5,127
/* Calculate where in the constant table we need to start */
subfic r6,r5,128
add r3,r3,r6
/* How many 16 byte chunks are in the tail */
srdi r7,r5,4
mtctr r7
/*
* Reduce the previously calculated 1024 bits to 64 bits, shifting
* 32 bits to include the trailing 32 bits of zeros
*/
lvx v0,0,r3
lvx v1,off16,r3
lvx v2,off32,r3
lvx v3,off48,r3
lvx v4,off64,r3
lvx v5,off80,r3
lvx v6,off96,r3
lvx v7,off112,r3
addi r3,r3,8*16
VPMSUMW(v0,v16,v0)
VPMSUMW(v1,v17,v1)
VPMSUMW(v2,v18,v2)
VPMSUMW(v3,v19,v3)
VPMSUMW(v4,v20,v4)
VPMSUMW(v5,v21,v5)
VPMSUMW(v6,v22,v6)
VPMSUMW(v7,v23,v7)
/* Now reduce the tail (0 - 112 bytes) */
cmpdi r7,0
beq 1f
lvx v16,0,r4
lvx v17,0,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off16,r4
lvx v17,off16,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off32,r4
lvx v17,off32,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off48,r4
lvx v17,off48,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off64,r4
lvx v17,off64,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off80,r4
lvx v17,off80,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off96,r4
lvx v17,off96,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
/* Now xor all the parallel chunks together */
1: vxor v0,v0,v1
vxor v2,v2,v3
vxor v4,v4,v5
vxor v6,v6,v7
vxor v0,v0,v2
vxor v4,v4,v6
vxor v0,v0,v4
.Lbarrett_reduction:
/* Barrett constants */
addis r3,r2,.barrett_constants@toc@ha
addi r3,r3,.barrett_constants@toc@l
lvx const1,0,r3
lvx const2,off16,r3
vsldoi v1,v0,v0,8
vxor v0,v0,v1 /* xor two 64 bit results together */
#ifdef REFLECT
/* shift left one bit */
vspltisb v1,1
vsl v0,v0,v1
#endif
vand v0,v0,mask_64bit
#ifndef REFLECT
/*
* Now for the Barrett reduction algorithm. The idea is to calculate q,
* the multiple of our polynomial that we need to subtract. By
* doing the computation 2x bits higher (ie 64 bits) and shifting the
* result back down 2x bits, we round down to the nearest multiple.
*/
VPMSUMD(v1,v0,const1) /* ma */
vsldoi v1,zeroes,v1,8 /* q = floor(ma/(2^64)) */
VPMSUMD(v1,v1,const2) /* qn */
vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
/*
* Get the result into r3. We need to shift it left 8 bytes:
* V0 [ 0 1 2 X ]
* V0 [ 0 X 2 3 ]
*/
vsldoi v0,v0,zeroes,8 /* shift result into top 64 bits */
#else
/*
* The reflected version of Barrett reduction. Instead of bit
* reflecting our data (which is expensive to do), we bit reflect our
* constants and our algorithm, which means the intermediate data in
* our vector registers goes from 0-63 instead of 63-0. We can reflect
* the algorithm because we don't carry in mod 2 arithmetic.
*/
vand v1,v0,mask_32bit /* bottom 32 bits of a */
VPMSUMD(v1,v1,const1) /* ma */
vand v1,v1,mask_32bit /* bottom 32bits of ma */
VPMSUMD(v1,v1,const2) /* qn */
vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
/*
* Since we are bit reflected, the result (ie the low 32 bits) is in
* the high 32 bits. We just need to shift it left 4 bytes
* V0 [ 0 1 X 3 ]
* V0 [ 0 X 2 3 ]
*/
vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */
#endif
/* Get it into r3 */
MFVRD(R3, v0)
.Lout:
subi r6,r1,56+10*16
subi r7,r1,56+2*16
lvx v20,0,r6
lvx v21,off16,r6
lvx v22,off32,r6
lvx v23,off48,r6
lvx v24,off64,r6
lvx v25,off80,r6
lvx v26,off96,r6
lvx v27,off112,r6
lvx v28,0,r7
lvx v29,off16,r7
ld r31,-8(r1)
ld r30,-16(r1)
ld r29,-24(r1)
ld r28,-32(r1)
ld r27,-40(r1)
ld r26,-48(r1)
ld r25,-56(r1)
blr
.Lfirst_warm_up_done:
lvx const1,0,r3
addi r3,r3,16
VPMSUMD(v8,v16,const1)
VPMSUMD(v9,v17,const1)
VPMSUMD(v10,v18,const1)
VPMSUMD(v11,v19,const1)
VPMSUMD(v12,v20,const1)
VPMSUMD(v13,v21,const1)
VPMSUMD(v14,v22,const1)
VPMSUMD(v15,v23,const1)
b .Lsecond_cool_down
.Lshort:
cmpdi r5,0
beq .Lzero
addis r3,r2,.short_constants@toc@ha
addi r3,r3,.short_constants@toc@l
/* Calculate where in the constant table we need to start */
subfic r6,r5,256
add r3,r3,r6
/* How many 16 byte chunks? */
srdi r7,r5,4
mtctr r7
vxor v19,v19,v19
vxor v20,v20,v20
lvx v0,0,r4
lvx v16,0,r3
VPERM(v0,v0,v16,byteswap)
vxor v0,v0,v8 /* xor in initial value */
VPMSUMW(v0,v0,v16)
bdz .Lv0
lvx v1,off16,r4
lvx v17,off16,r3
VPERM(v1,v1,v17,byteswap)
VPMSUMW(v1,v1,v17)
bdz .Lv1
lvx v2,off32,r4
lvx v16,off32,r3
VPERM(v2,v2,v16,byteswap)
VPMSUMW(v2,v2,v16)
bdz .Lv2
lvx v3,off48,r4
lvx v17,off48,r3
VPERM(v3,v3,v17,byteswap)
VPMSUMW(v3,v3,v17)
bdz .Lv3
lvx v4,off64,r4
lvx v16,off64,r3
VPERM(v4,v4,v16,byteswap)
VPMSUMW(v4,v4,v16)
bdz .Lv4
lvx v5,off80,r4
lvx v17,off80,r3
VPERM(v5,v5,v17,byteswap)
VPMSUMW(v5,v5,v17)
bdz .Lv5
lvx v6,off96,r4
lvx v16,off96,r3
VPERM(v6,v6,v16,byteswap)
VPMSUMW(v6,v6,v16)
bdz .Lv6
lvx v7,off112,r4
lvx v17,off112,r3
VPERM(v7,v7,v17,byteswap)
VPMSUMW(v7,v7,v17)
bdz .Lv7
addi r3,r3,128
addi r4,r4,128
lvx v8,0,r4
lvx v16,0,r3
VPERM(v8,v8,v16,byteswap)
VPMSUMW(v8,v8,v16)
bdz .Lv8
lvx v9,off16,r4
lvx v17,off16,r3
VPERM(v9,v9,v17,byteswap)
VPMSUMW(v9,v9,v17)
bdz .Lv9
lvx v10,off32,r4
lvx v16,off32,r3
VPERM(v10,v10,v16,byteswap)
VPMSUMW(v10,v10,v16)
bdz .Lv10
lvx v11,off48,r4
lvx v17,off48,r3
VPERM(v11,v11,v17,byteswap)
VPMSUMW(v11,v11,v17)
bdz .Lv11
lvx v12,off64,r4
lvx v16,off64,r3
VPERM(v12,v12,v16,byteswap)
VPMSUMW(v12,v12,v16)
bdz .Lv12
lvx v13,off80,r4
lvx v17,off80,r3
VPERM(v13,v13,v17,byteswap)
VPMSUMW(v13,v13,v17)
bdz .Lv13
lvx v14,off96,r4
lvx v16,off96,r3
VPERM(v14,v14,v16,byteswap)
VPMSUMW(v14,v14,v16)
bdz .Lv14
lvx v15,off112,r4
lvx v17,off112,r3
VPERM(v15,v15,v17,byteswap)
VPMSUMW(v15,v15,v17)
.Lv15: vxor v19,v19,v15
.Lv14: vxor v20,v20,v14
.Lv13: vxor v19,v19,v13
.Lv12: vxor v20,v20,v12
.Lv11: vxor v19,v19,v11
.Lv10: vxor v20,v20,v10
.Lv9: vxor v19,v19,v9
.Lv8: vxor v20,v20,v8
.Lv7: vxor v19,v19,v7
.Lv6: vxor v20,v20,v6
.Lv5: vxor v19,v19,v5
.Lv4: vxor v20,v20,v4
.Lv3: vxor v19,v19,v3
.Lv2: vxor v20,v20,v2
.Lv1: vxor v19,v19,v1
.Lv0: vxor v20,v20,v0
vxor v0,v19,v20
b .Lbarrett_reduction
.Lzero:
mr r3,r10
b .Lout
FUNC_END(CRC_FUNCTION_NAME)

Просмотреть файл

@ -1,20 +1,5 @@
/*
* Calculate the checksum of data that is 16 byte aligned and a multiple of
* 16 bytes.
*
* The first step is to reduce it to 1024 bits. We do this in 8 parallel
* chunks in order to mask the latency of the vpmsum instructions. If we
* have more than 32 kB of data to checksum we repeat this step multiple
* times, passing in the previous 1024 bits.
*
* The next step is to reduce the 1024 bits to 64 bits. This step adds
* 32 bits of 0s to the end - this matches what a CRC does. We just
* calculate constants that land the data in this 32 bits.
*
* We then use fixed point Barrett reduction to compute a mod n over GF(2)
* for n = CRC using POWER8 instructions. We use x = 32.
*
* http://en.wikipedia.org/wiki/Barrett_reduction
* Calculate a crc32c with vpmsum acceleration
*
* Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
*
@ -23,9 +8,6 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
#include <asm/ppc-opcode.h>
.section .rodata
.balign 16
@ -33,7 +15,6 @@
/* byte reverse permute constant */
.octa 0x0F0E0D0C0B0A09080706050403020100
#define MAX_SIZE 32768
.constants:
/* Reduce 262144 kbits to 1024 bits */
@ -860,694 +841,6 @@
/* 33 bit reflected Barrett constant n */
.octa 0x00000000000000000000000105ec76f1
.text
#if defined(__BIG_ENDIAN__)
#define BYTESWAP_DATA
#else
#undef BYTESWAP_DATA
#endif
#define off16 r25
#define off32 r26
#define off48 r27
#define off64 r28
#define off80 r29
#define off96 r30
#define off112 r31
#define const1 v24
#define const2 v25
#define byteswap v26
#define mask_32bit v27
#define mask_64bit v28
#define zeroes v29
#ifdef BYTESWAP_DATA
#define VPERM(A, B, C, D) vperm A, B, C, D
#else
#define VPERM(A, B, C, D)
#endif
/* unsigned int __crc32c_vpmsum(unsigned int crc, void *p, unsigned long len) */
FUNC_START(__crc32c_vpmsum)
std r31,-8(r1)
std r30,-16(r1)
std r29,-24(r1)
std r28,-32(r1)
std r27,-40(r1)
std r26,-48(r1)
std r25,-56(r1)
li off16,16
li off32,32
li off48,48
li off64,64
li off80,80
li off96,96
li off112,112
li r0,0
/* Enough room for saving 10 non volatile VMX registers */
subi r6,r1,56+10*16
subi r7,r1,56+2*16
stvx v20,0,r6
stvx v21,off16,r6
stvx v22,off32,r6
stvx v23,off48,r6
stvx v24,off64,r6
stvx v25,off80,r6
stvx v26,off96,r6
stvx v27,off112,r6
stvx v28,0,r7
stvx v29,off16,r7
mr r10,r3
vxor zeroes,zeroes,zeroes
vspltisw v0,-1
vsldoi mask_32bit,zeroes,v0,4
vsldoi mask_64bit,zeroes,v0,8
/* Get the initial value into v8 */
vxor v8,v8,v8
MTVRD(v8, R3)
vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */
#ifdef BYTESWAP_DATA
addis r3,r2,.byteswap_constant@toc@ha
addi r3,r3,.byteswap_constant@toc@l
lvx byteswap,0,r3
addi r3,r3,16
#endif
cmpdi r5,256
blt .Lshort
rldicr r6,r5,0,56
/* Checksum in blocks of MAX_SIZE */
1: lis r7,MAX_SIZE@h
ori r7,r7,MAX_SIZE@l
mr r9,r7
cmpd r6,r7
bgt 2f
mr r7,r6
2: subf r6,r7,r6
/* our main loop does 128 bytes at a time */
srdi r7,r7,7
/*
* Work out the offset into the constants table to start at. Each
* constant is 16 bytes, and it is used against 128 bytes of input
* data - 128 / 16 = 8
*/
sldi r8,r7,4
srdi r9,r9,3
subf r8,r8,r9
/* We reduce our final 128 bytes in a separate step */
addi r7,r7,-1
mtctr r7
addis r3,r2,.constants@toc@ha
addi r3,r3,.constants@toc@l
/* Find the start of our constants */
add r3,r3,r8
/* zero v0-v7 which will contain our checksums */
vxor v0,v0,v0
vxor v1,v1,v1
vxor v2,v2,v2
vxor v3,v3,v3
vxor v4,v4,v4
vxor v5,v5,v5
vxor v6,v6,v6
vxor v7,v7,v7
lvx const1,0,r3
/*
* If we are looping back to consume more data we use the values
* already in v16-v23.
*/
cmpdi r0,1
beq 2f
/* First warm up pass */
lvx v16,0,r4
lvx v17,off16,r4
VPERM(v16,v16,v16,byteswap)
VPERM(v17,v17,v17,byteswap)
lvx v18,off32,r4
lvx v19,off48,r4
VPERM(v18,v18,v18,byteswap)
VPERM(v19,v19,v19,byteswap)
lvx v20,off64,r4
lvx v21,off80,r4
VPERM(v20,v20,v20,byteswap)
VPERM(v21,v21,v21,byteswap)
lvx v22,off96,r4
lvx v23,off112,r4
VPERM(v22,v22,v22,byteswap)
VPERM(v23,v23,v23,byteswap)
addi r4,r4,8*16
/* xor in initial value */
vxor v16,v16,v8
2: bdz .Lfirst_warm_up_done
addi r3,r3,16
lvx const2,0,r3
/* Second warm up pass */
VPMSUMD(v8,v16,const1)
lvx v16,0,r4
VPERM(v16,v16,v16,byteswap)
ori r2,r2,0
VPMSUMD(v9,v17,const1)
lvx v17,off16,r4
VPERM(v17,v17,v17,byteswap)
ori r2,r2,0
VPMSUMD(v10,v18,const1)
lvx v18,off32,r4
VPERM(v18,v18,v18,byteswap)
ori r2,r2,0
VPMSUMD(v11,v19,const1)
lvx v19,off48,r4
VPERM(v19,v19,v19,byteswap)
ori r2,r2,0
VPMSUMD(v12,v20,const1)
lvx v20,off64,r4
VPERM(v20,v20,v20,byteswap)
ori r2,r2,0
VPMSUMD(v13,v21,const1)
lvx v21,off80,r4
VPERM(v21,v21,v21,byteswap)
ori r2,r2,0
VPMSUMD(v14,v22,const1)
lvx v22,off96,r4
VPERM(v22,v22,v22,byteswap)
ori r2,r2,0
VPMSUMD(v15,v23,const1)
lvx v23,off112,r4
VPERM(v23,v23,v23,byteswap)
addi r4,r4,8*16
bdz .Lfirst_cool_down
/*
* main loop. We modulo schedule it such that it takes three iterations
* to complete - first iteration load, second iteration vpmsum, third
* iteration xor.
*/
.balign 16
4: lvx const1,0,r3
addi r3,r3,16
ori r2,r2,0
vxor v0,v0,v8
VPMSUMD(v8,v16,const2)
lvx v16,0,r4
VPERM(v16,v16,v16,byteswap)
ori r2,r2,0
vxor v1,v1,v9
VPMSUMD(v9,v17,const2)
lvx v17,off16,r4
VPERM(v17,v17,v17,byteswap)
ori r2,r2,0
vxor v2,v2,v10
VPMSUMD(v10,v18,const2)
lvx v18,off32,r4
VPERM(v18,v18,v18,byteswap)
ori r2,r2,0
vxor v3,v3,v11
VPMSUMD(v11,v19,const2)
lvx v19,off48,r4
VPERM(v19,v19,v19,byteswap)
lvx const2,0,r3
ori r2,r2,0
vxor v4,v4,v12
VPMSUMD(v12,v20,const1)
lvx v20,off64,r4
VPERM(v20,v20,v20,byteswap)
ori r2,r2,0
vxor v5,v5,v13
VPMSUMD(v13,v21,const1)
lvx v21,off80,r4
VPERM(v21,v21,v21,byteswap)
ori r2,r2,0
vxor v6,v6,v14
VPMSUMD(v14,v22,const1)
lvx v22,off96,r4
VPERM(v22,v22,v22,byteswap)
ori r2,r2,0
vxor v7,v7,v15
VPMSUMD(v15,v23,const1)
lvx v23,off112,r4
VPERM(v23,v23,v23,byteswap)
addi r4,r4,8*16
bdnz 4b
.Lfirst_cool_down:
/* First cool down pass */
lvx const1,0,r3
addi r3,r3,16
vxor v0,v0,v8
VPMSUMD(v8,v16,const1)
ori r2,r2,0
vxor v1,v1,v9
VPMSUMD(v9,v17,const1)
ori r2,r2,0
vxor v2,v2,v10
VPMSUMD(v10,v18,const1)
ori r2,r2,0
vxor v3,v3,v11
VPMSUMD(v11,v19,const1)
ori r2,r2,0
vxor v4,v4,v12
VPMSUMD(v12,v20,const1)
ori r2,r2,0
vxor v5,v5,v13
VPMSUMD(v13,v21,const1)
ori r2,r2,0
vxor v6,v6,v14
VPMSUMD(v14,v22,const1)
ori r2,r2,0
vxor v7,v7,v15
VPMSUMD(v15,v23,const1)
ori r2,r2,0
.Lsecond_cool_down:
/* Second cool down pass */
vxor v0,v0,v8
vxor v1,v1,v9
vxor v2,v2,v10
vxor v3,v3,v11
vxor v4,v4,v12
vxor v5,v5,v13
vxor v6,v6,v14
vxor v7,v7,v15
/*
* vpmsumd produces a 96 bit result in the least significant bits
* of the register. Since we are bit reflected we have to shift it
* left 32 bits so it occupies the least significant bits in the
* bit reflected domain.
*/
vsldoi v0,v0,zeroes,4
vsldoi v1,v1,zeroes,4
vsldoi v2,v2,zeroes,4
vsldoi v3,v3,zeroes,4
vsldoi v4,v4,zeroes,4
vsldoi v5,v5,zeroes,4
vsldoi v6,v6,zeroes,4
vsldoi v7,v7,zeroes,4
/* xor with last 1024 bits */
lvx v8,0,r4
lvx v9,off16,r4
VPERM(v8,v8,v8,byteswap)
VPERM(v9,v9,v9,byteswap)
lvx v10,off32,r4
lvx v11,off48,r4
VPERM(v10,v10,v10,byteswap)
VPERM(v11,v11,v11,byteswap)
lvx v12,off64,r4
lvx v13,off80,r4
VPERM(v12,v12,v12,byteswap)
VPERM(v13,v13,v13,byteswap)
lvx v14,off96,r4
lvx v15,off112,r4
VPERM(v14,v14,v14,byteswap)
VPERM(v15,v15,v15,byteswap)
addi r4,r4,8*16
vxor v16,v0,v8
vxor v17,v1,v9
vxor v18,v2,v10
vxor v19,v3,v11
vxor v20,v4,v12
vxor v21,v5,v13
vxor v22,v6,v14
vxor v23,v7,v15
li r0,1
cmpdi r6,0
addi r6,r6,128
bne 1b
/* Work out how many bytes we have left */
andi. r5,r5,127
/* Calculate where in the constant table we need to start */
subfic r6,r5,128
add r3,r3,r6
/* How many 16 byte chunks are in the tail */
srdi r7,r5,4
mtctr r7
/*
* Reduce the previously calculated 1024 bits to 64 bits, shifting
* 32 bits to include the trailing 32 bits of zeros
*/
lvx v0,0,r3
lvx v1,off16,r3
lvx v2,off32,r3
lvx v3,off48,r3
lvx v4,off64,r3
lvx v5,off80,r3
lvx v6,off96,r3
lvx v7,off112,r3
addi r3,r3,8*16
VPMSUMW(v0,v16,v0)
VPMSUMW(v1,v17,v1)
VPMSUMW(v2,v18,v2)
VPMSUMW(v3,v19,v3)
VPMSUMW(v4,v20,v4)
VPMSUMW(v5,v21,v5)
VPMSUMW(v6,v22,v6)
VPMSUMW(v7,v23,v7)
/* Now reduce the tail (0 - 112 bytes) */
cmpdi r7,0
beq 1f
lvx v16,0,r4
lvx v17,0,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off16,r4
lvx v17,off16,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off32,r4
lvx v17,off32,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off48,r4
lvx v17,off48,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off64,r4
lvx v17,off64,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off80,r4
lvx v17,off80,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
bdz 1f
lvx v16,off96,r4
lvx v17,off96,r3
VPERM(v16,v16,v16,byteswap)
VPMSUMW(v16,v16,v17)
vxor v0,v0,v16
/* Now xor all the parallel chunks together */
1: vxor v0,v0,v1
vxor v2,v2,v3
vxor v4,v4,v5
vxor v6,v6,v7
vxor v0,v0,v2
vxor v4,v4,v6
vxor v0,v0,v4
.Lbarrett_reduction:
/* Barrett constants */
addis r3,r2,.barrett_constants@toc@ha
addi r3,r3,.barrett_constants@toc@l
lvx const1,0,r3
lvx const2,off16,r3
vsldoi v1,v0,v0,8
vxor v0,v0,v1 /* xor two 64 bit results together */
/* shift left one bit */
vspltisb v1,1
vsl v0,v0,v1
vand v0,v0,mask_64bit
/*
* The reflected version of Barrett reduction. Instead of bit
* reflecting our data (which is expensive to do), we bit reflect our
* constants and our algorithm, which means the intermediate data in
* our vector registers goes from 0-63 instead of 63-0. We can reflect
* the algorithm because we don't carry in mod 2 arithmetic.
*/
vand v1,v0,mask_32bit /* bottom 32 bits of a */
VPMSUMD(v1,v1,const1) /* ma */
vand v1,v1,mask_32bit /* bottom 32bits of ma */
VPMSUMD(v1,v1,const2) /* qn */
vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
/*
* Since we are bit reflected, the result (ie the low 32 bits) is in
* the high 32 bits. We just need to shift it left 4 bytes
* V0 [ 0 1 X 3 ]
* V0 [ 0 X 2 3 ]
*/
vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */
/* Get it into r3 */
MFVRD(R3, v0)
.Lout:
subi r6,r1,56+10*16
subi r7,r1,56+2*16
lvx v20,0,r6
lvx v21,off16,r6
lvx v22,off32,r6
lvx v23,off48,r6
lvx v24,off64,r6
lvx v25,off80,r6
lvx v26,off96,r6
lvx v27,off112,r6
lvx v28,0,r7
lvx v29,off16,r7
ld r31,-8(r1)
ld r30,-16(r1)
ld r29,-24(r1)
ld r28,-32(r1)
ld r27,-40(r1)
ld r26,-48(r1)
ld r25,-56(r1)
blr
.Lfirst_warm_up_done:
lvx const1,0,r3
addi r3,r3,16
VPMSUMD(v8,v16,const1)
VPMSUMD(v9,v17,const1)
VPMSUMD(v10,v18,const1)
VPMSUMD(v11,v19,const1)
VPMSUMD(v12,v20,const1)
VPMSUMD(v13,v21,const1)
VPMSUMD(v14,v22,const1)
VPMSUMD(v15,v23,const1)
b .Lsecond_cool_down
.Lshort:
cmpdi r5,0
beq .Lzero
addis r3,r2,.short_constants@toc@ha
addi r3,r3,.short_constants@toc@l
/* Calculate where in the constant table we need to start */
subfic r6,r5,256
add r3,r3,r6
/* How many 16 byte chunks? */
srdi r7,r5,4
mtctr r7
vxor v19,v19,v19
vxor v20,v20,v20
lvx v0,0,r4
lvx v16,0,r3
VPERM(v0,v0,v16,byteswap)
vxor v0,v0,v8 /* xor in initial value */
VPMSUMW(v0,v0,v16)
bdz .Lv0
lvx v1,off16,r4
lvx v17,off16,r3
VPERM(v1,v1,v17,byteswap)
VPMSUMW(v1,v1,v17)
bdz .Lv1
lvx v2,off32,r4
lvx v16,off32,r3
VPERM(v2,v2,v16,byteswap)
VPMSUMW(v2,v2,v16)
bdz .Lv2
lvx v3,off48,r4
lvx v17,off48,r3
VPERM(v3,v3,v17,byteswap)
VPMSUMW(v3,v3,v17)
bdz .Lv3
lvx v4,off64,r4
lvx v16,off64,r3
VPERM(v4,v4,v16,byteswap)
VPMSUMW(v4,v4,v16)
bdz .Lv4
lvx v5,off80,r4
lvx v17,off80,r3
VPERM(v5,v5,v17,byteswap)
VPMSUMW(v5,v5,v17)
bdz .Lv5
lvx v6,off96,r4
lvx v16,off96,r3
VPERM(v6,v6,v16,byteswap)
VPMSUMW(v6,v6,v16)
bdz .Lv6
lvx v7,off112,r4
lvx v17,off112,r3
VPERM(v7,v7,v17,byteswap)
VPMSUMW(v7,v7,v17)
bdz .Lv7
addi r3,r3,128
addi r4,r4,128
lvx v8,0,r4
lvx v16,0,r3
VPERM(v8,v8,v16,byteswap)
VPMSUMW(v8,v8,v16)
bdz .Lv8
lvx v9,off16,r4
lvx v17,off16,r3
VPERM(v9,v9,v17,byteswap)
VPMSUMW(v9,v9,v17)
bdz .Lv9
lvx v10,off32,r4
lvx v16,off32,r3
VPERM(v10,v10,v16,byteswap)
VPMSUMW(v10,v10,v16)
bdz .Lv10
lvx v11,off48,r4
lvx v17,off48,r3
VPERM(v11,v11,v17,byteswap)
VPMSUMW(v11,v11,v17)
bdz .Lv11
lvx v12,off64,r4
lvx v16,off64,r3
VPERM(v12,v12,v16,byteswap)
VPMSUMW(v12,v12,v16)
bdz .Lv12
lvx v13,off80,r4
lvx v17,off80,r3
VPERM(v13,v13,v17,byteswap)
VPMSUMW(v13,v13,v17)
bdz .Lv13
lvx v14,off96,r4
lvx v16,off96,r3
VPERM(v14,v14,v16,byteswap)
VPMSUMW(v14,v14,v16)
bdz .Lv14
lvx v15,off112,r4
lvx v17,off112,r3
VPERM(v15,v15,v17,byteswap)
VPMSUMW(v15,v15,v17)
.Lv15: vxor v19,v19,v15
.Lv14: vxor v20,v20,v14
.Lv13: vxor v19,v19,v13
.Lv12: vxor v20,v20,v12
.Lv11: vxor v19,v19,v11
.Lv10: vxor v20,v20,v10
.Lv9: vxor v19,v19,v9
.Lv8: vxor v20,v20,v8
.Lv7: vxor v19,v19,v7
.Lv6: vxor v20,v20,v6
.Lv5: vxor v19,v19,v5
.Lv4: vxor v20,v20,v4
.Lv3: vxor v19,v19,v3
.Lv2: vxor v20,v20,v2
.Lv1: vxor v19,v19,v1
.Lv0: vxor v20,v20,v0
vxor v0,v19,v20
b .Lbarrett_reduction
.Lzero:
mr r3,r10
b .Lout
FUNC_END(__crc32_vpmsum)
#define CRC_FUNCTION_NAME __crc32c_vpmsum
#define REFLECT
#include "crc32-vpmsum_core.S"

Просмотреть файл

@ -0,0 +1,850 @@
/*
* Calculate a CRC T10DIF with vpmsum acceleration
*
* Constants generated by crc32-vpmsum, available at
* https://github.com/antonblanchard/crc32-vpmsum
*
* crc32-vpmsum is
* Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
* and is available under the GPL v2 or later.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
.section .rodata
.balign 16
.byteswap_constant:
/* byte reverse permute constant */
.octa 0x0F0E0D0C0B0A09080706050403020100
.constants:
/* Reduce 262144 kbits to 1024 bits */
/* x^261184 mod p(x), x^261120 mod p(x) */
.octa 0x0000000056d300000000000052550000
/* x^260160 mod p(x), x^260096 mod p(x) */
.octa 0x00000000ee67000000000000a1e40000
/* x^259136 mod p(x), x^259072 mod p(x) */
.octa 0x0000000060830000000000004ad10000
/* x^258112 mod p(x), x^258048 mod p(x) */
.octa 0x000000008cfe0000000000009ab40000
/* x^257088 mod p(x), x^257024 mod p(x) */
.octa 0x000000003e93000000000000fdb50000
/* x^256064 mod p(x), x^256000 mod p(x) */
.octa 0x000000003c2000000000000045480000
/* x^255040 mod p(x), x^254976 mod p(x) */
.octa 0x00000000b1fc0000000000008d690000
/* x^254016 mod p(x), x^253952 mod p(x) */
.octa 0x00000000f82b00000000000024ad0000
/* x^252992 mod p(x), x^252928 mod p(x) */
.octa 0x0000000044420000000000009f1a0000
/* x^251968 mod p(x), x^251904 mod p(x) */
.octa 0x00000000e88c00000000000066ec0000
/* x^250944 mod p(x), x^250880 mod p(x) */
.octa 0x00000000385c000000000000c87d0000
/* x^249920 mod p(x), x^249856 mod p(x) */
.octa 0x000000003227000000000000c8ff0000
/* x^248896 mod p(x), x^248832 mod p(x) */
.octa 0x00000000a9a900000000000033440000
/* x^247872 mod p(x), x^247808 mod p(x) */
.octa 0x00000000abaa00000000000066eb0000
/* x^246848 mod p(x), x^246784 mod p(x) */
.octa 0x000000001ac3000000000000c4ef0000
/* x^245824 mod p(x), x^245760 mod p(x) */
.octa 0x0000000063f000000000000056f30000
/* x^244800 mod p(x), x^244736 mod p(x) */
.octa 0x0000000032cc00000000000002050000
/* x^243776 mod p(x), x^243712 mod p(x) */
.octa 0x00000000f8b5000000000000568e0000
/* x^242752 mod p(x), x^242688 mod p(x) */
.octa 0x000000008db100000000000064290000
/* x^241728 mod p(x), x^241664 mod p(x) */
.octa 0x0000000059ca0000000000006b660000
/* x^240704 mod p(x), x^240640 mod p(x) */
.octa 0x000000005f5c00000000000018f80000
/* x^239680 mod p(x), x^239616 mod p(x) */
.octa 0x0000000061af000000000000b6090000
/* x^238656 mod p(x), x^238592 mod p(x) */
.octa 0x00000000e29e000000000000099a0000
/* x^237632 mod p(x), x^237568 mod p(x) */
.octa 0x000000000975000000000000a8360000
/* x^236608 mod p(x), x^236544 mod p(x) */
.octa 0x0000000043900000000000004f570000
/* x^235584 mod p(x), x^235520 mod p(x) */
.octa 0x00000000f9cd000000000000134c0000
/* x^234560 mod p(x), x^234496 mod p(x) */
.octa 0x000000007c29000000000000ec380000
/* x^233536 mod p(x), x^233472 mod p(x) */
.octa 0x000000004c6a000000000000b0d10000
/* x^232512 mod p(x), x^232448 mod p(x) */
.octa 0x00000000e7290000000000007d3e0000
/* x^231488 mod p(x), x^231424 mod p(x) */
.octa 0x00000000f1ab000000000000f0b20000
/* x^230464 mod p(x), x^230400 mod p(x) */
.octa 0x0000000039db0000000000009c270000
/* x^229440 mod p(x), x^229376 mod p(x) */
.octa 0x000000005e2800000000000092890000
/* x^228416 mod p(x), x^228352 mod p(x) */
.octa 0x00000000d44e000000000000d5ee0000
/* x^227392 mod p(x), x^227328 mod p(x) */
.octa 0x00000000cd0a00000000000041f50000
/* x^226368 mod p(x), x^226304 mod p(x) */
.octa 0x00000000c5b400000000000010520000
/* x^225344 mod p(x), x^225280 mod p(x) */
.octa 0x00000000fd2100000000000042170000
/* x^224320 mod p(x), x^224256 mod p(x) */
.octa 0x000000002f2500000000000095c20000
/* x^223296 mod p(x), x^223232 mod p(x) */
.octa 0x000000001b0100000000000001ce0000
/* x^222272 mod p(x), x^222208 mod p(x) */
.octa 0x000000000d430000000000002aca0000
/* x^221248 mod p(x), x^221184 mod p(x) */
.octa 0x0000000030a6000000000000385e0000
/* x^220224 mod p(x), x^220160 mod p(x) */
.octa 0x00000000e37b0000000000006f7a0000
/* x^219200 mod p(x), x^219136 mod p(x) */
.octa 0x00000000873600000000000024320000
/* x^218176 mod p(x), x^218112 mod p(x) */
.octa 0x00000000e9fb000000000000bd9c0000
/* x^217152 mod p(x), x^217088 mod p(x) */
.octa 0x000000003b9500000000000054bc0000
/* x^216128 mod p(x), x^216064 mod p(x) */
.octa 0x00000000133e000000000000a4660000
/* x^215104 mod p(x), x^215040 mod p(x) */
.octa 0x00000000784500000000000079930000
/* x^214080 mod p(x), x^214016 mod p(x) */
.octa 0x00000000b9800000000000001bb80000
/* x^213056 mod p(x), x^212992 mod p(x) */
.octa 0x00000000687600000000000024400000
/* x^212032 mod p(x), x^211968 mod p(x) */
.octa 0x00000000aff300000000000029e10000
/* x^211008 mod p(x), x^210944 mod p(x) */
.octa 0x0000000024b50000000000005ded0000
/* x^209984 mod p(x), x^209920 mod p(x) */
.octa 0x0000000017e8000000000000b12e0000
/* x^208960 mod p(x), x^208896 mod p(x) */
.octa 0x00000000128400000000000026d20000
/* x^207936 mod p(x), x^207872 mod p(x) */
.octa 0x000000002115000000000000a32a0000
/* x^206912 mod p(x), x^206848 mod p(x) */
.octa 0x000000009595000000000000a1210000
/* x^205888 mod p(x), x^205824 mod p(x) */
.octa 0x00000000281e000000000000ee8b0000
/* x^204864 mod p(x), x^204800 mod p(x) */
.octa 0x0000000006010000000000003d0d0000
/* x^203840 mod p(x), x^203776 mod p(x) */
.octa 0x00000000e2b600000000000034e90000
/* x^202816 mod p(x), x^202752 mod p(x) */
.octa 0x000000001bd40000000000004cdb0000
/* x^201792 mod p(x), x^201728 mod p(x) */
.octa 0x00000000df2800000000000030e90000
/* x^200768 mod p(x), x^200704 mod p(x) */
.octa 0x0000000049c200000000000042590000
/* x^199744 mod p(x), x^199680 mod p(x) */
.octa 0x000000009b97000000000000df950000
/* x^198720 mod p(x), x^198656 mod p(x) */
.octa 0x000000006184000000000000da7b0000
/* x^197696 mod p(x), x^197632 mod p(x) */
.octa 0x00000000461700000000000012510000
/* x^196672 mod p(x), x^196608 mod p(x) */
.octa 0x000000009b40000000000000f37e0000
/* x^195648 mod p(x), x^195584 mod p(x) */
.octa 0x00000000eeb2000000000000ecf10000
/* x^194624 mod p(x), x^194560 mod p(x) */
.octa 0x00000000b2e800000000000050f20000
/* x^193600 mod p(x), x^193536 mod p(x) */
.octa 0x00000000f59a000000000000e0b30000
/* x^192576 mod p(x), x^192512 mod p(x) */
.octa 0x00000000467f0000000000004d5a0000
/* x^191552 mod p(x), x^191488 mod p(x) */
.octa 0x00000000da92000000000000bb010000
/* x^190528 mod p(x), x^190464 mod p(x) */
.octa 0x000000001e1000000000000022a40000
/* x^189504 mod p(x), x^189440 mod p(x) */
.octa 0x0000000058fe000000000000836f0000
/* x^188480 mod p(x), x^188416 mod p(x) */
.octa 0x00000000b9ce000000000000d78d0000
/* x^187456 mod p(x), x^187392 mod p(x) */
.octa 0x0000000022210000000000004f8d0000
/* x^186432 mod p(x), x^186368 mod p(x) */
.octa 0x00000000744600000000000033760000
/* x^185408 mod p(x), x^185344 mod p(x) */
.octa 0x000000001c2e000000000000a1e50000
/* x^184384 mod p(x), x^184320 mod p(x) */
.octa 0x00000000dcc8000000000000a1a40000
/* x^183360 mod p(x), x^183296 mod p(x) */
.octa 0x00000000910f00000000000019a20000
/* x^182336 mod p(x), x^182272 mod p(x) */
.octa 0x0000000055d5000000000000f6ae0000
/* x^181312 mod p(x), x^181248 mod p(x) */
.octa 0x00000000c8ba000000000000a7ac0000
/* x^180288 mod p(x), x^180224 mod p(x) */
.octa 0x0000000031f8000000000000eea20000
/* x^179264 mod p(x), x^179200 mod p(x) */
.octa 0x000000001966000000000000c4d90000
/* x^178240 mod p(x), x^178176 mod p(x) */
.octa 0x00000000b9810000000000002b470000
/* x^177216 mod p(x), x^177152 mod p(x) */
.octa 0x000000008303000000000000f7cf0000
/* x^176192 mod p(x), x^176128 mod p(x) */
.octa 0x000000002ce500000000000035b30000
/* x^175168 mod p(x), x^175104 mod p(x) */
.octa 0x000000002fae0000000000000c7c0000
/* x^174144 mod p(x), x^174080 mod p(x) */
.octa 0x00000000f50c0000000000009edf0000
/* x^173120 mod p(x), x^173056 mod p(x) */
.octa 0x00000000714f00000000000004cd0000
/* x^172096 mod p(x), x^172032 mod p(x) */
.octa 0x00000000c161000000000000541b0000
/* x^171072 mod p(x), x^171008 mod p(x) */
.octa 0x0000000021c8000000000000e2700000
/* x^170048 mod p(x), x^169984 mod p(x) */
.octa 0x00000000b93d00000000000009a60000
/* x^169024 mod p(x), x^168960 mod p(x) */
.octa 0x00000000fbcf000000000000761c0000
/* x^168000 mod p(x), x^167936 mod p(x) */
.octa 0x0000000026350000000000009db30000
/* x^166976 mod p(x), x^166912 mod p(x) */
.octa 0x00000000b64f0000000000003e9f0000
/* x^165952 mod p(x), x^165888 mod p(x) */
.octa 0x00000000bd0e00000000000078590000
/* x^164928 mod p(x), x^164864 mod p(x) */
.octa 0x00000000d9360000000000008bc80000
/* x^163904 mod p(x), x^163840 mod p(x) */
.octa 0x000000002f140000000000008c9f0000
/* x^162880 mod p(x), x^162816 mod p(x) */
.octa 0x000000006a270000000000006af70000
/* x^161856 mod p(x), x^161792 mod p(x) */
.octa 0x000000006685000000000000e5210000
/* x^160832 mod p(x), x^160768 mod p(x) */
.octa 0x0000000062da00000000000008290000
/* x^159808 mod p(x), x^159744 mod p(x) */
.octa 0x00000000bb4b000000000000e4d00000
/* x^158784 mod p(x), x^158720 mod p(x) */
.octa 0x00000000d2490000000000004ae10000
/* x^157760 mod p(x), x^157696 mod p(x) */
.octa 0x00000000c85b00000000000000e70000
/* x^156736 mod p(x), x^156672 mod p(x) */
.octa 0x00000000c37a00000000000015650000
/* x^155712 mod p(x), x^155648 mod p(x) */
.octa 0x0000000018530000000000001c2f0000
/* x^154688 mod p(x), x^154624 mod p(x) */
.octa 0x00000000b46600000000000037bd0000
/* x^153664 mod p(x), x^153600 mod p(x) */
.octa 0x00000000439b00000000000012190000
/* x^152640 mod p(x), x^152576 mod p(x) */
.octa 0x00000000b1260000000000005ece0000
/* x^151616 mod p(x), x^151552 mod p(x) */
.octa 0x00000000d8110000000000002a5e0000
/* x^150592 mod p(x), x^150528 mod p(x) */
.octa 0x00000000099f00000000000052330000
/* x^149568 mod p(x), x^149504 mod p(x) */
.octa 0x00000000f9f9000000000000f9120000
/* x^148544 mod p(x), x^148480 mod p(x) */
.octa 0x000000005cc00000000000000ddc0000
/* x^147520 mod p(x), x^147456 mod p(x) */
.octa 0x00000000343b00000000000012200000
/* x^146496 mod p(x), x^146432 mod p(x) */
.octa 0x000000009222000000000000d12b0000
/* x^145472 mod p(x), x^145408 mod p(x) */
.octa 0x00000000d781000000000000eb2d0000
/* x^144448 mod p(x), x^144384 mod p(x) */
.octa 0x000000000bf400000000000058970000
/* x^143424 mod p(x), x^143360 mod p(x) */
.octa 0x00000000094200000000000013690000
/* x^142400 mod p(x), x^142336 mod p(x) */
.octa 0x00000000d55100000000000051950000
/* x^141376 mod p(x), x^141312 mod p(x) */
.octa 0x000000008f11000000000000954b0000
/* x^140352 mod p(x), x^140288 mod p(x) */
.octa 0x00000000140f000000000000b29e0000
/* x^139328 mod p(x), x^139264 mod p(x) */
.octa 0x00000000c6db000000000000db5d0000
/* x^138304 mod p(x), x^138240 mod p(x) */
.octa 0x00000000715b000000000000dfaf0000
/* x^137280 mod p(x), x^137216 mod p(x) */
.octa 0x000000000dea000000000000e3b60000
/* x^136256 mod p(x), x^136192 mod p(x) */
.octa 0x000000006f94000000000000ddaf0000
/* x^135232 mod p(x), x^135168 mod p(x) */
.octa 0x0000000024e1000000000000e4f70000
/* x^134208 mod p(x), x^134144 mod p(x) */
.octa 0x000000008810000000000000aa110000
/* x^133184 mod p(x), x^133120 mod p(x) */
.octa 0x0000000030c2000000000000a8e60000
/* x^132160 mod p(x), x^132096 mod p(x) */
.octa 0x00000000e6d0000000000000ccf30000
/* x^131136 mod p(x), x^131072 mod p(x) */
.octa 0x000000004da000000000000079bf0000
/* x^130112 mod p(x), x^130048 mod p(x) */
.octa 0x000000007759000000000000b3a30000
/* x^129088 mod p(x), x^129024 mod p(x) */
.octa 0x00000000597400000000000028790000
/* x^128064 mod p(x), x^128000 mod p(x) */
.octa 0x000000007acd000000000000b5820000
/* x^127040 mod p(x), x^126976 mod p(x) */
.octa 0x00000000e6e400000000000026ad0000
/* x^126016 mod p(x), x^125952 mod p(x) */
.octa 0x000000006d49000000000000985b0000
/* x^124992 mod p(x), x^124928 mod p(x) */
.octa 0x000000000f0800000000000011520000
/* x^123968 mod p(x), x^123904 mod p(x) */
.octa 0x000000002c7f000000000000846c0000
/* x^122944 mod p(x), x^122880 mod p(x) */
.octa 0x000000005ce7000000000000ae1d0000
/* x^121920 mod p(x), x^121856 mod p(x) */
.octa 0x00000000d4cb000000000000e21d0000
/* x^120896 mod p(x), x^120832 mod p(x) */
.octa 0x000000003a2300000000000019bb0000
/* x^119872 mod p(x), x^119808 mod p(x) */
.octa 0x000000000e1700000000000095290000
/* x^118848 mod p(x), x^118784 mod p(x) */
.octa 0x000000006e6400000000000050d20000
/* x^117824 mod p(x), x^117760 mod p(x) */
.octa 0x000000008d5c0000000000000cd10000
/* x^116800 mod p(x), x^116736 mod p(x) */
.octa 0x00000000ef310000000000007b570000
/* x^115776 mod p(x), x^115712 mod p(x) */
.octa 0x00000000645d00000000000053d60000
/* x^114752 mod p(x), x^114688 mod p(x) */
.octa 0x0000000018fc00000000000077510000
/* x^113728 mod p(x), x^113664 mod p(x) */
.octa 0x000000000cb3000000000000a7b70000
/* x^112704 mod p(x), x^112640 mod p(x) */
.octa 0x00000000991b000000000000d0780000
/* x^111680 mod p(x), x^111616 mod p(x) */
.octa 0x00000000845a000000000000be3c0000
/* x^110656 mod p(x), x^110592 mod p(x) */
.octa 0x00000000d3a9000000000000df020000
/* x^109632 mod p(x), x^109568 mod p(x) */
.octa 0x0000000017d7000000000000063e0000
/* x^108608 mod p(x), x^108544 mod p(x) */
.octa 0x000000007a860000000000008ab40000
/* x^107584 mod p(x), x^107520 mod p(x) */
.octa 0x00000000fd7c000000000000c7bd0000
/* x^106560 mod p(x), x^106496 mod p(x) */
.octa 0x00000000a56b000000000000efd60000
/* x^105536 mod p(x), x^105472 mod p(x) */
.octa 0x0000000010e400000000000071380000
/* x^104512 mod p(x), x^104448 mod p(x) */
.octa 0x00000000994500000000000004d30000
/* x^103488 mod p(x), x^103424 mod p(x) */
.octa 0x00000000b83c0000000000003b0e0000
/* x^102464 mod p(x), x^102400 mod p(x) */
.octa 0x00000000d6c10000000000008b020000
/* x^101440 mod p(x), x^101376 mod p(x) */
.octa 0x000000009efc000000000000da940000
/* x^100416 mod p(x), x^100352 mod p(x) */
.octa 0x000000005e87000000000000f9f70000
/* x^99392 mod p(x), x^99328 mod p(x) */
.octa 0x000000006c9b00000000000045e40000
/* x^98368 mod p(x), x^98304 mod p(x) */
.octa 0x00000000178a00000000000083940000
/* x^97344 mod p(x), x^97280 mod p(x) */
.octa 0x00000000f0c8000000000000f0a00000
/* x^96320 mod p(x), x^96256 mod p(x) */
.octa 0x00000000f699000000000000b74b0000
/* x^95296 mod p(x), x^95232 mod p(x) */
.octa 0x00000000316d000000000000c1cf0000
/* x^94272 mod p(x), x^94208 mod p(x) */
.octa 0x00000000987e00000000000072680000
/* x^93248 mod p(x), x^93184 mod p(x) */
.octa 0x00000000acff000000000000e0ab0000
/* x^92224 mod p(x), x^92160 mod p(x) */
.octa 0x00000000a1f6000000000000c5a80000
/* x^91200 mod p(x), x^91136 mod p(x) */
.octa 0x0000000061bd000000000000cf690000
/* x^90176 mod p(x), x^90112 mod p(x) */
.octa 0x00000000c9f2000000000000cbcc0000
/* x^89152 mod p(x), x^89088 mod p(x) */
.octa 0x000000005a33000000000000de050000
/* x^88128 mod p(x), x^88064 mod p(x) */
.octa 0x00000000e416000000000000ccd70000
/* x^87104 mod p(x), x^87040 mod p(x) */
.octa 0x0000000058930000000000002f670000
/* x^86080 mod p(x), x^86016 mod p(x) */
.octa 0x00000000a9d3000000000000152f0000
/* x^85056 mod p(x), x^84992 mod p(x) */
.octa 0x00000000c114000000000000ecc20000
/* x^84032 mod p(x), x^83968 mod p(x) */
.octa 0x00000000b9270000000000007c890000
/* x^83008 mod p(x), x^82944 mod p(x) */
.octa 0x000000002e6000000000000006ee0000
/* x^81984 mod p(x), x^81920 mod p(x) */
.octa 0x00000000dfc600000000000009100000
/* x^80960 mod p(x), x^80896 mod p(x) */
.octa 0x000000004911000000000000ad4e0000
/* x^79936 mod p(x), x^79872 mod p(x) */
.octa 0x00000000ae1b000000000000b04d0000
/* x^78912 mod p(x), x^78848 mod p(x) */
.octa 0x0000000005fa000000000000e9900000
/* x^77888 mod p(x), x^77824 mod p(x) */
.octa 0x0000000004a1000000000000cc6f0000
/* x^76864 mod p(x), x^76800 mod p(x) */
.octa 0x00000000af73000000000000ed110000
/* x^75840 mod p(x), x^75776 mod p(x) */
.octa 0x0000000082530000000000008f7e0000
/* x^74816 mod p(x), x^74752 mod p(x) */
.octa 0x00000000cfdc000000000000594f0000
/* x^73792 mod p(x), x^73728 mod p(x) */
.octa 0x00000000a6b6000000000000a8750000
/* x^72768 mod p(x), x^72704 mod p(x) */
.octa 0x00000000fd76000000000000aa0c0000
/* x^71744 mod p(x), x^71680 mod p(x) */
.octa 0x0000000006f500000000000071db0000
/* x^70720 mod p(x), x^70656 mod p(x) */
.octa 0x0000000037ca000000000000ab0c0000
/* x^69696 mod p(x), x^69632 mod p(x) */
.octa 0x00000000d7ab000000000000b7a00000
/* x^68672 mod p(x), x^68608 mod p(x) */
.octa 0x00000000440800000000000090d30000
/* x^67648 mod p(x), x^67584 mod p(x) */
.octa 0x00000000186100000000000054730000
/* x^66624 mod p(x), x^66560 mod p(x) */
.octa 0x000000007368000000000000a3a20000
/* x^65600 mod p(x), x^65536 mod p(x) */
.octa 0x0000000026d0000000000000f9040000
/* x^64576 mod p(x), x^64512 mod p(x) */
.octa 0x00000000fe770000000000009c0a0000
/* x^63552 mod p(x), x^63488 mod p(x) */
.octa 0x000000002cba000000000000d1e70000
/* x^62528 mod p(x), x^62464 mod p(x) */
.octa 0x00000000f8bd0000000000005ac10000
/* x^61504 mod p(x), x^61440 mod p(x) */
.octa 0x000000007372000000000000d68d0000
/* x^60480 mod p(x), x^60416 mod p(x) */
.octa 0x00000000f37f00000000000089f60000
/* x^59456 mod p(x), x^59392 mod p(x) */
.octa 0x00000000078400000000000008a90000
/* x^58432 mod p(x), x^58368 mod p(x) */
.octa 0x00000000d3e400000000000042360000
/* x^57408 mod p(x), x^57344 mod p(x) */
.octa 0x00000000eba800000000000092d50000
/* x^56384 mod p(x), x^56320 mod p(x) */
.octa 0x00000000afbe000000000000b4d50000
/* x^55360 mod p(x), x^55296 mod p(x) */
.octa 0x00000000d8ca000000000000c9060000
/* x^54336 mod p(x), x^54272 mod p(x) */
.octa 0x00000000c2d00000000000008f4f0000
/* x^53312 mod p(x), x^53248 mod p(x) */
.octa 0x00000000373200000000000028690000
/* x^52288 mod p(x), x^52224 mod p(x) */
.octa 0x0000000046ae000000000000c3b30000
/* x^51264 mod p(x), x^51200 mod p(x) */
.octa 0x00000000b243000000000000f8700000
/* x^50240 mod p(x), x^50176 mod p(x) */
.octa 0x00000000f7f500000000000029eb0000
/* x^49216 mod p(x), x^49152 mod p(x) */
.octa 0x000000000c7e000000000000fe730000
/* x^48192 mod p(x), x^48128 mod p(x) */
.octa 0x00000000c38200000000000096000000
/* x^47168 mod p(x), x^47104 mod p(x) */
.octa 0x000000008956000000000000683c0000
/* x^46144 mod p(x), x^46080 mod p(x) */
.octa 0x00000000422d0000000000005f1e0000
/* x^45120 mod p(x), x^45056 mod p(x) */
.octa 0x00000000ac0f0000000000006f810000
/* x^44096 mod p(x), x^44032 mod p(x) */
.octa 0x00000000ce30000000000000031f0000
/* x^43072 mod p(x), x^43008 mod p(x) */
.octa 0x000000003d43000000000000455a0000
/* x^42048 mod p(x), x^41984 mod p(x) */
.octa 0x000000007ebe000000000000a6050000
/* x^41024 mod p(x), x^40960 mod p(x) */
.octa 0x00000000976e00000000000077eb0000
/* x^40000 mod p(x), x^39936 mod p(x) */
.octa 0x000000000872000000000000389c0000
/* x^38976 mod p(x), x^38912 mod p(x) */
.octa 0x000000008979000000000000c7b20000
/* x^37952 mod p(x), x^37888 mod p(x) */
.octa 0x000000005c1e0000000000001d870000
/* x^36928 mod p(x), x^36864 mod p(x) */
.octa 0x00000000aebb00000000000045810000
/* x^35904 mod p(x), x^35840 mod p(x) */
.octa 0x000000004f7e0000000000006d4a0000
/* x^34880 mod p(x), x^34816 mod p(x) */
.octa 0x00000000ea98000000000000b9200000
/* x^33856 mod p(x), x^33792 mod p(x) */
.octa 0x00000000f39600000000000022f20000
/* x^32832 mod p(x), x^32768 mod p(x) */
.octa 0x000000000bc500000000000041ca0000
/* x^31808 mod p(x), x^31744 mod p(x) */
.octa 0x00000000786400000000000078500000
/* x^30784 mod p(x), x^30720 mod p(x) */
.octa 0x00000000be970000000000009e7e0000
/* x^29760 mod p(x), x^29696 mod p(x) */
.octa 0x00000000dd6d000000000000a53c0000
/* x^28736 mod p(x), x^28672 mod p(x) */
.octa 0x000000004c3f00000000000039340000
/* x^27712 mod p(x), x^27648 mod p(x) */
.octa 0x0000000093a4000000000000b58e0000
/* x^26688 mod p(x), x^26624 mod p(x) */
.octa 0x0000000050fb00000000000062d40000
/* x^25664 mod p(x), x^25600 mod p(x) */
.octa 0x00000000f505000000000000a26f0000
/* x^24640 mod p(x), x^24576 mod p(x) */
.octa 0x0000000064f900000000000065e60000
/* x^23616 mod p(x), x^23552 mod p(x) */
.octa 0x00000000e8c2000000000000aad90000
/* x^22592 mod p(x), x^22528 mod p(x) */
.octa 0x00000000720b000000000000a3b00000
/* x^21568 mod p(x), x^21504 mod p(x) */
.octa 0x00000000e992000000000000d2680000
/* x^20544 mod p(x), x^20480 mod p(x) */
.octa 0x000000009132000000000000cf4c0000
/* x^19520 mod p(x), x^19456 mod p(x) */
.octa 0x00000000608a00000000000076610000
/* x^18496 mod p(x), x^18432 mod p(x) */
.octa 0x000000009948000000000000fb9f0000
/* x^17472 mod p(x), x^17408 mod p(x) */
.octa 0x00000000173000000000000003770000
/* x^16448 mod p(x), x^16384 mod p(x) */
.octa 0x000000006fe300000000000004880000
/* x^15424 mod p(x), x^15360 mod p(x) */
.octa 0x00000000e15300000000000056a70000
/* x^14400 mod p(x), x^14336 mod p(x) */
.octa 0x0000000092d60000000000009dfd0000
/* x^13376 mod p(x), x^13312 mod p(x) */
.octa 0x0000000002fd00000000000074c80000
/* x^12352 mod p(x), x^12288 mod p(x) */
.octa 0x00000000c78b000000000000a3ec0000
/* x^11328 mod p(x), x^11264 mod p(x) */
.octa 0x000000009262000000000000b3530000
/* x^10304 mod p(x), x^10240 mod p(x) */
.octa 0x0000000084f200000000000047bf0000
/* x^9280 mod p(x), x^9216 mod p(x) */
.octa 0x0000000067ee000000000000e97c0000
/* x^8256 mod p(x), x^8192 mod p(x) */
.octa 0x00000000535b00000000000091e10000
/* x^7232 mod p(x), x^7168 mod p(x) */
.octa 0x000000007ebb00000000000055060000
/* x^6208 mod p(x), x^6144 mod p(x) */
.octa 0x00000000c6a1000000000000fd360000
/* x^5184 mod p(x), x^5120 mod p(x) */
.octa 0x000000001be500000000000055860000
/* x^4160 mod p(x), x^4096 mod p(x) */
.octa 0x00000000ae0e0000000000005bd00000
/* x^3136 mod p(x), x^3072 mod p(x) */
.octa 0x0000000022040000000000008db20000
/* x^2112 mod p(x), x^2048 mod p(x) */
.octa 0x00000000c9eb000000000000efe20000
/* x^1088 mod p(x), x^1024 mod p(x) */
.octa 0x0000000039b400000000000051d10000
.short_constants:
/* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */
/* x^2048 mod p(x), x^2016 mod p(x), x^1984 mod p(x), x^1952 mod p(x) */
.octa 0xefe20000dccf00009440000033590000
/* x^1920 mod p(x), x^1888 mod p(x), x^1856 mod p(x), x^1824 mod p(x) */
.octa 0xee6300002f3f000062180000e0ed0000
/* x^1792 mod p(x), x^1760 mod p(x), x^1728 mod p(x), x^1696 mod p(x) */
.octa 0xcf5f000017ef0000ccbe000023d30000
/* x^1664 mod p(x), x^1632 mod p(x), x^1600 mod p(x), x^1568 mod p(x) */
.octa 0x6d0c0000a30e00000920000042630000
/* x^1536 mod p(x), x^1504 mod p(x), x^1472 mod p(x), x^1440 mod p(x) */
.octa 0x21d30000932b0000a7a00000efcc0000
/* x^1408 mod p(x), x^1376 mod p(x), x^1344 mod p(x), x^1312 mod p(x) */
.octa 0x10be00000b310000666f00000d1c0000
/* x^1280 mod p(x), x^1248 mod p(x), x^1216 mod p(x), x^1184 mod p(x) */
.octa 0x1f240000ce9e0000caad0000589e0000
/* x^1152 mod p(x), x^1120 mod p(x), x^1088 mod p(x), x^1056 mod p(x) */
.octa 0x29610000d02b000039b400007cf50000
/* x^1024 mod p(x), x^992 mod p(x), x^960 mod p(x), x^928 mod p(x) */
.octa 0x51d100009d9d00003c0e0000bfd60000
/* x^896 mod p(x), x^864 mod p(x), x^832 mod p(x), x^800 mod p(x) */
.octa 0xda390000ceae000013830000713c0000
/* x^768 mod p(x), x^736 mod p(x), x^704 mod p(x), x^672 mod p(x) */
.octa 0xb67800001e16000085c0000080a60000
/* x^640 mod p(x), x^608 mod p(x), x^576 mod p(x), x^544 mod p(x) */
.octa 0x0db40000f7f90000371d0000e6580000
/* x^512 mod p(x), x^480 mod p(x), x^448 mod p(x), x^416 mod p(x) */
.octa 0x87e70000044c0000aadb0000a4970000
/* x^384 mod p(x), x^352 mod p(x), x^320 mod p(x), x^288 mod p(x) */
.octa 0x1f990000ad180000d8b30000e7b50000
/* x^256 mod p(x), x^224 mod p(x), x^192 mod p(x), x^160 mod p(x) */
.octa 0xbe6c00006ee300004c1a000006df0000
/* x^128 mod p(x), x^96 mod p(x), x^64 mod p(x), x^32 mod p(x) */
.octa 0xfb0b00002d560000136800008bb70000
.barrett_constants:
/* Barrett constant m - (4^32)/n */
.octa 0x000000000000000000000001f65a57f8 /* x^64 div p(x) */
/* Barrett constant n */
.octa 0x0000000000000000000000018bb70000
#define CRC_FUNCTION_NAME __crct10dif_vpmsum
#include "crc32-vpmsum_core.S"

Просмотреть файл

@ -0,0 +1,128 @@
/*
* Calculate a CRC T10-DIF with vpmsum acceleration
*
* Copyright 2017, Daniel Axtens, IBM Corporation.
* [based on crc32c-vpmsum_glue.c]
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*/
#include <linux/crc-t10dif.h>
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
#include <asm/switch_to.h>
#define VMX_ALIGN 16
#define VMX_ALIGN_MASK (VMX_ALIGN-1)
#define VECTOR_BREAKPOINT 64
u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len);
static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
{
unsigned int prealign;
unsigned int tail;
u32 crc = crci;
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
return crc_t10dif_generic(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
crc = crc_t10dif_generic(crc, p, prealign);
len -= prealign;
p += prealign;
}
if (len & ~VMX_ALIGN_MASK) {
crc <<= 16;
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
crc = __crct10dif_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
disable_kernel_altivec();
pagefault_enable();
preempt_enable();
crc >>= 16;
}
tail = len & VMX_ALIGN_MASK;
if (tail) {
p += len & ~VMX_ALIGN_MASK;
crc = crc_t10dif_generic(crc, p, tail);
}
return crc & 0xffff;
}
static int crct10dif_vpmsum_init(struct shash_desc *desc)
{
u16 *crc = shash_desc_ctx(desc);
*crc = 0;
return 0;
}
static int crct10dif_vpmsum_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u16 *crc = shash_desc_ctx(desc);
*crc = crct10dif_vpmsum(*crc, data, length);
return 0;
}
static int crct10dif_vpmsum_final(struct shash_desc *desc, u8 *out)
{
u16 *crcp = shash_desc_ctx(desc);
*(u16 *)out = *crcp;
return 0;
}
static struct shash_alg alg = {
.init = crct10dif_vpmsum_init,
.update = crct10dif_vpmsum_update,
.final = crct10dif_vpmsum_final,
.descsize = CRC_T10DIF_DIGEST_SIZE,
.digestsize = CRC_T10DIF_DIGEST_SIZE,
.base = {
.cra_name = "crct10dif",
.cra_driver_name = "crct10dif-vpmsum",
.cra_priority = 200,
.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init crct10dif_vpmsum_mod_init(void)
{
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
return crypto_register_shash(&alg);
}
static void __exit crct10dif_vpmsum_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crct10dif_vpmsum_mod_init);
module_exit(crct10dif_vpmsum_mod_fini);
MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>");
MODULE_DESCRIPTION("CRCT10DIF using vector polynomial multiply-sum instructions");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crct10dif");
MODULE_ALIAS_CRYPTO("crct10dif-vpmsum");

Просмотреть файл

@ -65,7 +65,6 @@
#include <linux/linkage.h>
#include <asm/inst.h>
#define CONCAT(a,b) a##b
#define VMOVDQ vmovdqu
#define xdata0 %xmm0
@ -92,8 +91,6 @@
#define num_bytes %r8
#define tmp %r10
#define DDQ(i) CONCAT(ddq_add_,i)
#define XMM(i) CONCAT(%xmm, i)
#define DDQ_DATA 0
#define XDATA 1
#define KEY_128 1
@ -131,12 +128,12 @@ ddq_add_8:
/* generate a unique variable for ddq_add_x */
.macro setddq n
var_ddq_add = DDQ(\n)
var_ddq_add = ddq_add_\n
.endm
/* generate a unique variable for xmm register */
.macro setxdata n
var_xdata = XMM(\n)
var_xdata = %xmm\n
.endm
/* club the numeric 'id' to the symbol 'name' */

Просмотреть файл

@ -1522,7 +1522,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[2 * 4];
le128 buf[2 * 4];
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
@ -1540,7 +1540,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[2 * 4];
le128 buf[2 * 4];
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),

Просмотреть файл

@ -27,6 +27,7 @@
#include <linux/module.h>
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/skcipher.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
@ -457,7 +458,7 @@ void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
le128 ivblk = *iv;
/* generate next IV */
le128_gf128mul_x_ble(iv, &ivblk);
gf128mul_x_ble(iv, &ivblk);
/* CC <- T xor C */
u128_xor(dst, src, (u128 *)&ivblk);

Просмотреть файл

@ -328,7 +328,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[SERPENT_PARALLEL_BLOCKS];
le128 buf[SERPENT_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->crypt_ctx,
.fpu_enabled = false,
@ -355,7 +355,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[SERPENT_PARALLEL_BLOCKS];
le128 buf[SERPENT_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->crypt_ctx,
.fpu_enabled = false,

Просмотреть файл

@ -296,7 +296,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[3];
le128 buf[3];
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
@ -314,7 +314,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[3];
le128 buf[3];
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),

Просмотреть файл

@ -125,16 +125,6 @@ static inline void le128_inc(le128 *i)
i->b = cpu_to_le64(b);
}
static inline void le128_gf128mul_x_ble(le128 *dst, const le128 *src)
{
u64 a = le64_to_cpu(src->a);
u64 b = le64_to_cpu(src->b);
u64 _tt = ((s64)a >> 63) & 0x87;
dst->a = cpu_to_le64((a << 1) ^ (b >> 63));
dst->b = cpu_to_le64((b << 1) ^ _tt);
}
extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,

Просмотреть файл

@ -374,7 +374,6 @@ config CRYPTO_XTS
tristate "XTS support"
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
select CRYPTO_ECB
help
XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
@ -513,6 +512,23 @@ config CRYPTO_CRCT10DIF_PCLMUL
'crct10dif-plcmul' module, which is faster when computing the
crct10dif checksum as compared with the generic table implementation.
config CRYPTO_CRCT10DIF_VPMSUM
tristate "CRC32T10DIF powerpc64 hardware acceleration"
depends on PPC64 && ALTIVEC && CRC_T10DIF
select CRYPTO_HASH
help
CRC10T10DIF algorithm implemented using vector polynomial
multiply-sum (vpmsum) instructions, introduced in POWER8. Enable on
POWER8 and newer processors for improved performance.
config CRYPTO_VPMSUM_TESTER
tristate "Powerpc64 vpmsum hardware acceleration tester"
depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM
help
Stress test for CRC32c and CRC-T10DIF algorithms implemented with
POWER8 vpmsum instructions.
Unless you are testing these algorithms, you don't need this.
config CRYPTO_GHASH
tristate "GHASH digest algorithm"
select CRYPTO_GF128MUL

Просмотреть файл

@ -166,5 +166,34 @@ int crypto_unregister_acomp(struct acomp_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
int crypto_register_acomps(struct acomp_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_acomp(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_acomp(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_acomps);
void crypto_unregister_acomps(struct acomp_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_acomp(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");

Просмотреть файл

@ -160,11 +160,11 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (sock->state == SS_CONNECTED)
return -EINVAL;
if (addr_len != sizeof(*sa))
if (addr_len < sizeof(*sa))
return -EINVAL;
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
type = alg_get_type(sa->salg_type);
if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {

Просмотреть файл

@ -963,11 +963,11 @@ void crypto_inc(u8 *a, unsigned int size)
u32 c;
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
!((unsigned long)b & (__alignof__(*b) - 1)))
IS_ALIGNED((unsigned long)b, __alignof__(*b)))
for (; size >= 4; size -= 4) {
c = be32_to_cpu(*--b) + 1;
*b = cpu_to_be32(c);
if (c)
if (likely(c))
return;
}

Просмотреть файл

@ -45,6 +45,11 @@ struct aead_async_req {
char iv[];
};
struct aead_tfm {
struct crypto_aead *aead;
bool has_key;
};
struct aead_ctx {
struct aead_sg_list tsgl;
struct aead_async_rsgl first_rsgl;
@ -723,24 +728,146 @@ static struct proto_ops algif_aead_ops = {
.poll = aead_poll,
};
static int aead_check_key(struct socket *sock)
{
int err = 0;
struct sock *psk;
struct alg_sock *pask;
struct aead_tfm *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
if (ask->refcnt)
goto unlock_child;
psk = ask->parent;
pask = alg_sk(ask->parent);
tfm = pask->private;
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (!tfm->has_key)
goto unlock;
if (!pask->refcnt++)
sock_hold(psk);
ask->refcnt = 1;
sock_put(psk);
err = 0;
unlock:
release_sock(psk);
unlock_child:
release_sock(sk);
return err;
}
static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t size)
{
int err;
err = aead_check_key(sock);
if (err)
return err;
return aead_sendmsg(sock, msg, size);
}
static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
int err;
err = aead_check_key(sock);
if (err)
return err;
return aead_sendpage(sock, page, offset, size, flags);
}
static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
int err;
err = aead_check_key(sock);
if (err)
return err;
return aead_recvmsg(sock, msg, ignored, flags);
}
static struct proto_ops algif_aead_ops_nokey = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.getsockopt = sock_no_getsockopt,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.setsockopt = sock_no_setsockopt,
.release = af_alg_release,
.sendmsg = aead_sendmsg_nokey,
.sendpage = aead_sendpage_nokey,
.recvmsg = aead_recvmsg_nokey,
.poll = aead_poll,
};
static void *aead_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_aead(name, type, mask);
struct aead_tfm *tfm;
struct crypto_aead *aead;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
aead = crypto_alloc_aead(name, type, mask);
if (IS_ERR(aead)) {
kfree(tfm);
return ERR_CAST(aead);
}
tfm->aead = aead;
return tfm;
}
static void aead_release(void *private)
{
crypto_free_aead(private);
struct aead_tfm *tfm = private;
crypto_free_aead(tfm->aead);
kfree(tfm);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
return crypto_aead_setauthsize(private, authsize);
struct aead_tfm *tfm = private;
return crypto_aead_setauthsize(tfm->aead, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_aead_setkey(private, key, keylen);
struct aead_tfm *tfm = private;
int err;
err = crypto_aead_setkey(tfm->aead, key, keylen);
tfm->has_key = !err;
return err;
}
static void aead_sock_destruct(struct sock *sk)
@ -757,12 +884,14 @@ static void aead_sock_destruct(struct sock *sk)
af_alg_release_parent(sk);
}
static int aead_accept_parent(void *private, struct sock *sk)
static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct aead_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
unsigned int ivlen = crypto_aead_ivsize(private);
struct aead_tfm *tfm = private;
struct crypto_aead *aead = tfm->aead;
unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
unsigned int ivlen = crypto_aead_ivsize(aead);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@ -789,7 +918,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
ask->private = ctx;
aead_request_set_tfm(&ctx->aead_req, private);
aead_request_set_tfm(&ctx->aead_req, aead);
aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
@ -798,13 +927,25 @@ static int aead_accept_parent(void *private, struct sock *sk)
return 0;
}
static int aead_accept_parent(void *private, struct sock *sk)
{
struct aead_tfm *tfm = private;
if (!tfm->has_key)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
}
static const struct af_alg_type algif_type_aead = {
.bind = aead_bind,
.release = aead_release,
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.accept = aead_accept_parent,
.accept_nokey = aead_accept_parent_nokey,
.ops = &algif_aead_ops,
.ops_nokey = &algif_aead_ops_nokey,
.name = "aead",
.owner = THIS_MODULE
};

Просмотреть файл

@ -10,6 +10,7 @@
*
*/
#include <crypto/algapi.h>
#include <crypto/cbc.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
@ -108,8 +109,10 @@ static void crypto_cbc_free(struct skcipher_instance *inst)
static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_attr_type *algt;
struct crypto_spawn *spawn;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
@ -120,8 +123,16 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
if (!inst)
return -ENOMEM;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
algt = crypto_get_attr_type(tb);
err = PTR_ERR(algt);
if (IS_ERR(algt))
goto err_free_inst;
mask = CRYPTO_ALG_TYPE_MASK |
crypto_requires_off(algt->type, algt->mask,
CRYPTO_ALG_NEED_FALLBACK);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
err = PTR_ERR(alg);
if (IS_ERR(alg))
goto err_free_inst;

Просмотреть файл

@ -83,7 +83,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_cipher rcipher;
strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.blocksize = alg->cra_blocksize;
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
@ -102,7 +102,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rcomp;
strncpy(rcomp.type, "compression", sizeof(rcomp.type));
strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rcomp))
goto nla_put_failure;
@ -116,7 +116,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
strncpy(racomp.type, "acomp", sizeof(racomp.type));
strlcpy(racomp.type, "acomp", sizeof(racomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
sizeof(struct crypto_report_acomp), &racomp))
@ -131,7 +131,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(struct crypto_report_akcipher), &rakcipher))
@ -146,7 +146,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_kpp rkpp;
strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
sizeof(struct crypto_report_kpp), &rkpp))
@ -160,10 +160,10 @@ nla_put_failure:
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
strncpy(ualg->cru_driver_name, alg->cra_driver_name,
strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
strncpy(ualg->cru_module_name, module_name(alg->cra_module),
strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
sizeof(ualg->cru_module_name));
ualg->cru_type = 0;
@ -176,7 +176,7 @@ static int crypto_report_one(struct crypto_alg *alg,
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_report_larval rl;
strncpy(rl.type, "larval", sizeof(rl.type));
strlcpy(rl.type, "larval", sizeof(rl.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
sizeof(struct crypto_report_larval), &rl))
goto nla_put_failure;

Просмотреть файл

@ -181,15 +181,24 @@ static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm)
static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_attr_type *algt;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return ERR_CAST(algt);
mask = CRYPTO_ALG_TYPE_MASK |
crypto_requires_off(algt->type, algt->mask,
CRYPTO_ALG_NEED_FALLBACK);
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, mask);
if (IS_ERR(alg))
return ERR_CAST(alg);
@ -350,6 +359,8 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
struct skcipher_alg *alg;
struct crypto_skcipher_spawn *spawn;
const char *cipher_name;
u32 mask;
int err;
algt = crypto_get_attr_type(tb);
@ -367,12 +378,14 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
if (!inst)
return -ENOMEM;
mask = crypto_requires_sync(algt->type, algt->mask) |
crypto_requires_off(algt->type, algt->mask,
CRYPTO_ALG_NEED_FALLBACK);
spawn = skcipher_instance_ctx(inst);
crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
err = crypto_grab_skcipher(spawn, cipher_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
err = crypto_grab_skcipher(spawn, cipher_name, 0, mask);
if (err)
goto err_free_inst;

Просмотреть файл

@ -43,20 +43,24 @@ struct deflate_ctx {
struct z_stream_s decomp_stream;
};
static int deflate_comp_init(struct deflate_ctx *ctx)
static int deflate_comp_init(struct deflate_ctx *ctx, int format)
{
int ret = 0;
struct z_stream_s *stream = &ctx->comp_stream;
stream->workspace = vzalloc(zlib_deflate_workspacesize(
-DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL));
MAX_WBITS, MAX_MEM_LEVEL));
if (!stream->workspace) {
ret = -ENOMEM;
goto out;
}
ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
-DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
Z_DEFAULT_STRATEGY);
if (format)
ret = zlib_deflateInit(stream, 3);
else
ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
-DEFLATE_DEF_WINBITS,
DEFLATE_DEF_MEMLEVEL,
Z_DEFAULT_STRATEGY);
if (ret != Z_OK) {
ret = -EINVAL;
goto out_free;
@ -68,7 +72,7 @@ out_free:
goto out;
}
static int deflate_decomp_init(struct deflate_ctx *ctx)
static int deflate_decomp_init(struct deflate_ctx *ctx, int format)
{
int ret = 0;
struct z_stream_s *stream = &ctx->decomp_stream;
@ -78,7 +82,10 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
ret = -ENOMEM;
goto out;
}
ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
if (format)
ret = zlib_inflateInit(stream);
else
ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
if (ret != Z_OK) {
ret = -EINVAL;
goto out_free;
@ -102,21 +109,21 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx)
vfree(ctx->decomp_stream.workspace);
}
static int __deflate_init(void *ctx)
static int __deflate_init(void *ctx, int format)
{
int ret;
ret = deflate_comp_init(ctx);
ret = deflate_comp_init(ctx, format);
if (ret)
goto out;
ret = deflate_decomp_init(ctx);
ret = deflate_decomp_init(ctx, format);
if (ret)
deflate_comp_exit(ctx);
out:
return ret;
}
static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format)
{
struct deflate_ctx *ctx;
int ret;
@ -125,7 +132,7 @@ static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
if (!ctx)
return ERR_PTR(-ENOMEM);
ret = __deflate_init(ctx);
ret = __deflate_init(ctx, format);
if (ret) {
kfree(ctx);
return ERR_PTR(ret);
@ -134,11 +141,21 @@ static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
{
return gen_deflate_alloc_ctx(tfm, 0);
}
static void *zlib_deflate_alloc_ctx(struct crypto_scomp *tfm)
{
return gen_deflate_alloc_ctx(tfm, 1);
}
static int deflate_init(struct crypto_tfm *tfm)
{
struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
return __deflate_init(ctx);
return __deflate_init(ctx, 0);
}
static void __deflate_exit(void *ctx)
@ -272,7 +289,7 @@ static struct crypto_alg alg = {
.coa_decompress = deflate_decompress } }
};
static struct scomp_alg scomp = {
static struct scomp_alg scomp[] = { {
.alloc_ctx = deflate_alloc_ctx,
.free_ctx = deflate_free_ctx,
.compress = deflate_scompress,
@ -282,7 +299,17 @@ static struct scomp_alg scomp = {
.cra_driver_name = "deflate-scomp",
.cra_module = THIS_MODULE,
}
};
}, {
.alloc_ctx = zlib_deflate_alloc_ctx,
.free_ctx = deflate_free_ctx,
.compress = deflate_scompress,
.decompress = deflate_sdecompress,
.base = {
.cra_name = "zlib-deflate",
.cra_driver_name = "zlib-deflate-scomp",
.cra_module = THIS_MODULE,
}
} };
static int __init deflate_mod_init(void)
{
@ -292,7 +319,7 @@ static int __init deflate_mod_init(void)
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
ret = crypto_register_scomps(scomp, ARRAY_SIZE(scomp));
if (ret) {
crypto_unregister_alg(&alg);
return ret;
@ -304,7 +331,7 @@ static int __init deflate_mod_init(void)
static void __exit deflate_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
}
module_init(deflate_mod_init);

Просмотреть файл

@ -79,7 +79,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
return 0;
}
static int dh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct dh_ctx *ctx = dh_get_ctx(tfm);
struct dh params;

Просмотреть файл

@ -1749,17 +1749,16 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
{
struct scatterlist sg_in;
struct scatterlist sg_in, sg_out;
int ret;
sg_init_one(&sg_in, inbuf, inlen);
sg_init_one(&sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
while (outlen) {
u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
struct scatterlist sg_out;
/* Output buffer may not be valid for SGL, use scratchpad */
sg_init_one(&sg_out, drbg->outscratchpad, cryptlen);
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
cryptlen, drbg->V);
ret = crypto_skcipher_encrypt(drbg->ctr_req);

Просмотреть файл

@ -38,7 +38,8 @@ static unsigned int ecdh_supported_curve(unsigned int curve_id)
}
}
static int ecdh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
struct ecdh params;

Просмотреть файл

@ -44,7 +44,7 @@
---------------------------------------------------------------------------
Issue 31/01/2006
This file provides fast multiplication in GF(128) as required by several
This file provides fast multiplication in GF(2^128) as required by several
cryptographic authentication modes
*/
@ -88,76 +88,59 @@
q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
}
/* Given the value i in 0..255 as the byte overflow when a field element
in GHASH is multiplied by x^8, this function will return the values that
are generated in the lo 16-bit word of the field value by applying the
modular polynomial. The values lo_byte and hi_byte are returned via the
macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into
memory as required by a suitable definition of this macro operating on
the table above
*/
#define xx(p, q) 0x##p##q
#define xda_bbe(i) ( \
(i & 0x80 ? xx(43, 80) : 0) ^ (i & 0x40 ? xx(21, c0) : 0) ^ \
(i & 0x20 ? xx(10, e0) : 0) ^ (i & 0x10 ? xx(08, 70) : 0) ^ \
(i & 0x08 ? xx(04, 38) : 0) ^ (i & 0x04 ? xx(02, 1c) : 0) ^ \
(i & 0x02 ? xx(01, 0e) : 0) ^ (i & 0x01 ? xx(00, 87) : 0) \
)
#define xda_lle(i) ( \
(i & 0x80 ? xx(e1, 00) : 0) ^ (i & 0x40 ? xx(70, 80) : 0) ^ \
(i & 0x20 ? xx(38, 40) : 0) ^ (i & 0x10 ? xx(1c, 20) : 0) ^ \
(i & 0x08 ? xx(0e, 10) : 0) ^ (i & 0x04 ? xx(07, 08) : 0) ^ \
(i & 0x02 ? xx(03, 84) : 0) ^ (i & 0x01 ? xx(01, c2) : 0) \
)
static const u16 gf128mul_table_lle[256] = gf128mul_dat(xda_lle);
static const u16 gf128mul_table_bbe[256] = gf128mul_dat(xda_bbe);
/* These functions multiply a field element by x, by x^4 and by x^8
* in the polynomial field representation. It uses 32-bit word operations
* to gain speed but compensates for machine endianess and hence works
* correctly on both styles of machine.
/*
* Given a value i in 0..255 as the byte overflow when a field element
* in GF(2^128) is multiplied by x^8, the following macro returns the
* 16-bit value that must be XOR-ed into the low-degree end of the
* product to reduce it modulo the polynomial x^128 + x^7 + x^2 + x + 1.
*
* There are two versions of the macro, and hence two tables: one for
* the "be" convention where the highest-order bit is the coefficient of
* the highest-degree polynomial term, and one for the "le" convention
* where the highest-order bit is the coefficient of the lowest-degree
* polynomial term. In both cases the values are stored in CPU byte
* endianness such that the coefficients are ordered consistently across
* bytes, i.e. in the "be" table bits 15..0 of the stored value
* correspond to the coefficients of x^15..x^0, and in the "le" table
* bits 15..0 correspond to the coefficients of x^0..x^15.
*
* Therefore, provided that the appropriate byte endianness conversions
* are done by the multiplication functions (and these must be in place
* anyway to support both little endian and big endian CPUs), the "be"
* table can be used for multiplications of both "bbe" and "ble"
* elements, and the "le" table can be used for multiplications of both
* "lle" and "lbe" elements.
*/
static void gf128mul_x_lle(be128 *r, const be128 *x)
{
u64 a = be64_to_cpu(x->a);
u64 b = be64_to_cpu(x->b);
u64 _tt = gf128mul_table_lle[(b << 7) & 0xff];
#define xda_be(i) ( \
(i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \
(i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \
(i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \
(i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \
)
r->b = cpu_to_be64((b >> 1) | (a << 63));
r->a = cpu_to_be64((a >> 1) ^ (_tt << 48));
}
#define xda_le(i) ( \
(i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \
(i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \
(i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \
(i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \
)
static void gf128mul_x_bbe(be128 *r, const be128 *x)
{
u64 a = be64_to_cpu(x->a);
u64 b = be64_to_cpu(x->b);
u64 _tt = gf128mul_table_bbe[a >> 63];
static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
r->a = cpu_to_be64((a << 1) | (b >> 63));
r->b = cpu_to_be64((b << 1) ^ _tt);
}
void gf128mul_x_ble(be128 *r, const be128 *x)
{
u64 a = le64_to_cpu(x->a);
u64 b = le64_to_cpu(x->b);
u64 _tt = gf128mul_table_bbe[b >> 63];
r->a = cpu_to_le64((a << 1) ^ _tt);
r->b = cpu_to_le64((b << 1) | (a >> 63));
}
EXPORT_SYMBOL(gf128mul_x_ble);
/*
* The following functions multiply a field element by x^8 in
* the polynomial field representation. They use 64-bit word operations
* to gain speed but compensate for machine endianness and hence work
* correctly on both styles of machine.
*/
static void gf128mul_x8_lle(be128 *x)
{
u64 a = be64_to_cpu(x->a);
u64 b = be64_to_cpu(x->b);
u64 _tt = gf128mul_table_lle[b & 0xff];
u64 _tt = gf128mul_table_le[b & 0xff];
x->b = cpu_to_be64((b >> 8) | (a << 56));
x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
@ -167,7 +150,7 @@ static void gf128mul_x8_bbe(be128 *x)
{
u64 a = be64_to_cpu(x->a);
u64 b = be64_to_cpu(x->b);
u64 _tt = gf128mul_table_bbe[a >> 56];
u64 _tt = gf128mul_table_be[a >> 56];
x->a = cpu_to_be64((a << 8) | (b >> 56));
x->b = cpu_to_be64((b << 8) ^ _tt);
@ -251,7 +234,7 @@ EXPORT_SYMBOL(gf128mul_bbe);
/* This version uses 64k bytes of table space.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(128). If we consider a GF(128) value in
value in GF(2^128). If we consider a GF(2^128) value in
the buffer's lowest byte, we can construct a table of
the 256 16 byte values that result from the 256 values
of this byte. This requires 4096 bytes. But we also
@ -315,7 +298,7 @@ void gf128mul_free_64k(struct gf128mul_64k *t)
}
EXPORT_SYMBOL(gf128mul_free_64k);
void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t)
void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t)
{
u8 *ap = (u8 *)a;
be128 r[1];
@ -330,7 +313,7 @@ EXPORT_SYMBOL(gf128mul_64k_bbe);
/* This version uses 4k bytes of table space.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(128). If we consider a GF(128) value in a
value in GF(2^128). If we consider a GF(2^128) value in a
single byte, we can construct a table of the 256 16 byte
values that result from the 256 values of this byte.
This requires 4096 bytes. If we take the highest byte in
@ -388,7 +371,7 @@ out:
}
EXPORT_SYMBOL(gf128mul_init_4k_bbe);
void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t)
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
be128 r[1];
@ -403,7 +386,7 @@ void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t)
}
EXPORT_SYMBOL(gf128mul_4k_lle);
void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t)
void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
be128 r[1];

Просмотреть файл

@ -97,7 +97,7 @@ static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
if (out_len < 0)
return out_len;
return -EINVAL;
*dlen = out_len;
return 0;

Просмотреть файл

@ -98,7 +98,7 @@ static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
if (out_len < 0)
return out_len;
return -EINVAL;
*dlen = out_len;
return 0;

Просмотреть файл

@ -21,9 +21,11 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/cryptohash.h>
#include <asm/byteorder.h>
#define MD5_DIGEST_WORDS 4
#define MD5_MESSAGE_BYTES 64
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
@ -47,6 +49,97 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
}
}
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
#define MD5STEP(f, w, x, y, z, in, s) \
(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
static void md5_transform(__u32 *hash, __u32 const *in)
{
u32 a, b, c, d;
a = hash[0];
b = hash[1];
c = hash[2];
d = hash[3];
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
hash[0] += a;
hash[1] += b;
hash[2] += c;
hash[3] += d;
}
static inline void md5_transform_helper(struct md5_state *ctx)
{
le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));

Просмотреть файл

@ -353,5 +353,34 @@ int crypto_unregister_scomp(struct scomp_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
int crypto_register_scomps(struct scomp_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_scomp(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_scomp(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_scomps);
void crypto_unregister_scomps(struct scomp_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_scomp(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synchronous compression type");

Просмотреть файл

@ -83,47 +83,47 @@ struct tcrypt_result {
struct aead_test_suite {
struct {
struct aead_testvec *vecs;
const struct aead_testvec *vecs;
unsigned int count;
} enc, dec;
};
struct cipher_test_suite {
struct {
struct cipher_testvec *vecs;
const struct cipher_testvec *vecs;
unsigned int count;
} enc, dec;
};
struct comp_test_suite {
struct {
struct comp_testvec *vecs;
const struct comp_testvec *vecs;
unsigned int count;
} comp, decomp;
};
struct hash_test_suite {
struct hash_testvec *vecs;
const struct hash_testvec *vecs;
unsigned int count;
};
struct cprng_test_suite {
struct cprng_testvec *vecs;
const struct cprng_testvec *vecs;
unsigned int count;
};
struct drbg_test_suite {
struct drbg_testvec *vecs;
const struct drbg_testvec *vecs;
unsigned int count;
};
struct akcipher_test_suite {
struct akcipher_testvec *vecs;
const struct akcipher_testvec *vecs;
unsigned int count;
};
struct kpp_test_suite {
struct kpp_testvec *vecs;
const struct kpp_testvec *vecs;
unsigned int count;
};
@ -145,7 +145,8 @@ struct alg_test_desc {
} suite;
};
static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
static const unsigned int IDX[8] = {
IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
static void hexdump(unsigned char *buf, unsigned int len)
{
@ -203,7 +204,7 @@ static int wait_async_op(struct tcrypt_result *tr, int ret)
}
static int ahash_partial_update(struct ahash_request **preq,
struct crypto_ahash *tfm, struct hash_testvec *template,
struct crypto_ahash *tfm, const struct hash_testvec *template,
void *hash_buff, int k, int temp, struct scatterlist *sg,
const char *algo, char *result, struct tcrypt_result *tresult)
{
@ -260,9 +261,9 @@ out_nostate:
return ret;
}
static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
unsigned int tcount, bool use_digest,
const int align_offset)
static int __test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template, unsigned int tcount,
bool use_digest, const int align_offset)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
size_t digest_size = crypto_ahash_digestsize(tfm);
@ -538,7 +539,8 @@ out_nobuf:
return ret;
}
static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
static int test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template,
unsigned int tcount, bool use_digest)
{
unsigned int alignmask;
@ -566,7 +568,7 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
}
static int __test_aead(struct crypto_aead *tfm, int enc,
struct aead_testvec *template, unsigned int tcount,
const struct aead_testvec *template, unsigned int tcount,
const bool diff_dst, const int align_offset)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
@ -957,7 +959,7 @@ out_noxbuf:
}
static int test_aead(struct crypto_aead *tfm, int enc,
struct aead_testvec *template, unsigned int tcount)
const struct aead_testvec *template, unsigned int tcount)
{
unsigned int alignmask;
int ret;
@ -990,7 +992,8 @@ static int test_aead(struct crypto_aead *tfm, int enc,
}
static int test_cipher(struct crypto_cipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount)
const struct cipher_testvec *template,
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
unsigned int i, j, k;
@ -1068,7 +1071,8 @@ out_nobuf:
}
static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount,
const struct cipher_testvec *template,
unsigned int tcount,
const bool diff_dst, const int align_offset)
{
const char *algo =
@ -1332,7 +1336,8 @@ out_nobuf:
}
static int test_skcipher(struct crypto_skcipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount)
const struct cipher_testvec *template,
unsigned int tcount)
{
unsigned int alignmask;
int ret;
@ -1364,8 +1369,10 @@ static int test_skcipher(struct crypto_skcipher *tfm, int enc,
return 0;
}
static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
struct comp_testvec *dtemplate, int ctcount, int dtcount)
static int test_comp(struct crypto_comp *tfm,
const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate,
int ctcount, int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
unsigned int i;
@ -1444,12 +1451,14 @@ out:
return ret;
}
static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
struct comp_testvec *dtemplate, int ctcount, int dtcount)
static int test_acomp(struct crypto_acomp *tfm,
const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate,
int ctcount, int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
unsigned int i;
char *output;
char *output, *decomp_out;
int ret;
struct scatterlist src, dst;
struct acomp_req *req;
@ -1459,6 +1468,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
if (!output)
return -ENOMEM;
decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
if (!decomp_out) {
kfree(output);
return -ENOMEM;
}
for (i = 0; i < ctcount; i++) {
unsigned int dlen = COMP_BUF_SIZE;
int ilen = ctemplate[i].inlen;
@ -1497,7 +1512,23 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
goto out;
}
if (req->dlen != ctemplate[i].outlen) {
ilen = req->dlen;
dlen = COMP_BUF_SIZE;
sg_init_one(&src, output, ilen);
sg_init_one(&dst, decomp_out, dlen);
init_completion(&result.completion);
acomp_request_set_params(req, &src, &dst, ilen, dlen);
ret = wait_async_op(&result, crypto_acomp_decompress(req));
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
if (req->dlen != ctemplate[i].inlen) {
pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
i + 1, algo, req->dlen);
ret = -EINVAL;
@ -1506,7 +1537,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
goto out;
}
if (memcmp(output, ctemplate[i].output, req->dlen)) {
if (memcmp(input_vec, decomp_out, req->dlen)) {
pr_err("alg: acomp: Compression test %d failed for %s\n",
i + 1, algo);
hexdump(output, req->dlen);
@ -1584,11 +1615,13 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
ret = 0;
out:
kfree(decomp_out);
kfree(output);
return ret;
}
static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
static int test_cprng(struct crypto_rng *tfm,
const struct cprng_testvec *template,
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
@ -1865,7 +1898,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
}
static int drbg_cavs_test(struct drbg_testvec *test, int pr,
static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
const char *driver, u32 type, u32 mask)
{
int ret = -EAGAIN;
@ -1939,7 +1972,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
int err = 0;
int pr = 0;
int i = 0;
struct drbg_testvec *template = desc->suite.drbg.vecs;
const struct drbg_testvec *template = desc->suite.drbg.vecs;
unsigned int tcount = desc->suite.drbg.count;
if (0 == memcmp(driver, "drbg_pr_", 8))
@ -1958,7 +1991,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
}
static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
const char *alg)
{
struct kpp_request *req;
@ -2050,7 +2083,7 @@ free_req:
}
static int test_kpp(struct crypto_kpp *tfm, const char *alg,
struct kpp_testvec *vecs, unsigned int tcount)
const struct kpp_testvec *vecs, unsigned int tcount)
{
int ret, i;
@ -2086,7 +2119,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
}
static int test_akcipher_one(struct crypto_akcipher *tfm,
struct akcipher_testvec *vecs)
const struct akcipher_testvec *vecs)
{
char *xbuf[XBUFSIZE];
struct akcipher_request *req;
@ -2206,7 +2239,8 @@ free_xbuf:
}
static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
struct akcipher_testvec *vecs, unsigned int tcount)
const struct akcipher_testvec *vecs,
unsigned int tcount)
{
const char *algo =
crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
@ -2634,6 +2668,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "ctr(des3_ede)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = __VECS(des3_ede_ctr_enc_tv_template),
@ -2875,6 +2910,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "ecb(cipher_null)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "ecb(des)",
.test = alg_test_skcipher,
@ -3477,6 +3513,16 @@ static const struct alg_test_desc alg_test_descs[] = {
.dec = __VECS(tf_xts_dec_tv_template)
}
}
}, {
.alg = "zlib-deflate",
.test = alg_test_comp,
.fips_allowed = 1,
.suite = {
.comp = {
.comp = __VECS(zlib_deflate_comp_tv_template),
.decomp = __VECS(zlib_deflate_decomp_tv_template)
}
}
}
};

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -39,11 +39,11 @@ struct xts_instance_ctx {
};
struct rctx {
be128 buf[XTS_BUFFER_SIZE / sizeof(be128)];
le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
be128 t;
le128 t;
be128 *ext;
le128 *ext;
struct scatterlist srcbuf[2];
struct scatterlist dstbuf[2];
@ -99,7 +99,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
static int post_crypt(struct skcipher_request *req)
{
struct rctx *rctx = skcipher_request_ctx(req);
be128 *buf = rctx->ext ?: rctx->buf;
le128 *buf = rctx->ext ?: rctx->buf;
struct skcipher_request *subreq;
const int bs = XTS_BLOCK_SIZE;
struct skcipher_walk w;
@ -112,12 +112,12 @@ static int post_crypt(struct skcipher_request *req)
while (w.nbytes) {
unsigned int avail = w.nbytes;
be128 *wdst;
le128 *wdst;
wdst = w.dst.virt.addr;
do {
be128_xor(wdst, buf++, wdst);
le128_xor(wdst, buf++, wdst);
wdst++;
} while ((avail -= bs) >= bs);
@ -150,7 +150,7 @@ out:
static int pre_crypt(struct skcipher_request *req)
{
struct rctx *rctx = skcipher_request_ctx(req);
be128 *buf = rctx->ext ?: rctx->buf;
le128 *buf = rctx->ext ?: rctx->buf;
struct skcipher_request *subreq;
const int bs = XTS_BLOCK_SIZE;
struct skcipher_walk w;
@ -174,15 +174,15 @@ static int pre_crypt(struct skcipher_request *req)
while (w.nbytes) {
unsigned int avail = w.nbytes;
be128 *wsrc;
be128 *wdst;
le128 *wsrc;
le128 *wdst;
wsrc = w.src.virt.addr;
wdst = w.dst.virt.addr;
do {
*buf++ = rctx->t;
be128_xor(wdst++, &rctx->t, wsrc++);
le128_xor(wdst++, &rctx->t, wsrc++);
gf128mul_x_ble(&rctx->t, &rctx->t);
} while ((avail -= bs) >= bs);
@ -369,8 +369,8 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
const unsigned int max_blks = req->tbuflen / bsize;
struct blkcipher_walk walk;
unsigned int nblocks;
be128 *src, *dst, *t;
be128 *t_buf = req->tbuf;
le128 *src, *dst, *t;
le128 *t_buf = req->tbuf;
int err, i;
BUG_ON(max_blks < 1);
@ -383,8 +383,8 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
return err;
nblocks = min(nbytes / bsize, max_blks);
src = (be128 *)walk.src.virt.addr;
dst = (be128 *)walk.dst.virt.addr;
src = (le128 *)walk.src.virt.addr;
dst = (le128 *)walk.dst.virt.addr;
/* calculate first value of T */
req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
@ -400,7 +400,7 @@ first:
t = &t_buf[i];
/* PP <- T xor P */
be128_xor(dst + i, t, src + i);
le128_xor(dst + i, t, src + i);
}
/* CC <- E(Key2,PP) */
@ -409,7 +409,7 @@ first:
/* C <- T xor CC */
for (i = 0; i < nblocks; i++)
be128_xor(dst + i, dst + i, &t_buf[i]);
le128_xor(dst + i, dst + i, &t_buf[i]);
src += nblocks;
dst += nblocks;
@ -417,7 +417,7 @@ first:
nblocks = min(nbytes / bsize, max_blks);
} while (nblocks > 0);
*(be128 *)walk.iv = *t;
*(le128 *)walk.iv = *t;
err = blkcipher_walk_done(desc, &walk, nbytes);
nbytes = walk.nbytes;
@ -425,8 +425,8 @@ first:
break;
nblocks = min(nbytes / bsize, max_blks);
src = (be128 *)walk.src.virt.addr;
dst = (be128 *)walk.dst.virt.addr;
src = (le128 *)walk.src.virt.addr;
dst = (le128 *)walk.dst.virt.addr;
}
return err;

Просмотреть файл

@ -294,20 +294,6 @@ config HW_RANDOM_POWERNV
If unsure, say Y.
config HW_RANDOM_EXYNOS
tristate "EXYNOS HW random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on EXYNOS SOCs.
To compile this driver as a module, choose M here: the
module will be called exynos-rng.
If unsure, say Y.
config HW_RANDOM_TPM
tristate "TPM HW Random Number Generator support"
depends on TCG_TPM
@ -423,6 +409,20 @@ config HW_RANDOM_CAVIUM
If unsure, say Y.
config HW_RANDOM_MTK
tristate "Mediatek Random Number Generator support"
depends on HW_RANDOM
depends on ARCH_MEDIATEK || COMPILE_TEST
default y
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Mediatek SoCs.
To compile this driver as a module, choose M here. the
module will be called mtk-rng.
If unsure, say Y.
config HW_RANDOM_S390
tristate "S390 True Random Number Generator support"
depends on S390

Просмотреть файл

@ -24,7 +24,6 @@ obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
@ -36,4 +35,5 @@ obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o

Просмотреть файл

@ -1,231 +0,0 @@
/*
* exynos-rng.c - Random Number Generator driver for the exynos
*
* Copyright (C) 2012 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/hw_random.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
#define EXYNOS_PRNG_STATUS_OFFSET 0x10
#define EXYNOS_PRNG_SEED_OFFSET 0x140
#define EXYNOS_PRNG_OUT1_OFFSET 0x160
#define SEED_SETTING_DONE BIT(1)
#define PRNG_START 0x18
#define PRNG_DONE BIT(5)
#define EXYNOS_AUTOSUSPEND_DELAY 100
struct exynos_rng {
struct device *dev;
struct hwrng rng;
void __iomem *mem;
struct clk *clk;
};
static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
{
return readl_relaxed(rng->mem + offset);
}
static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
{
writel_relaxed(val, rng->mem + offset);
}
static int exynos_rng_configure(struct exynos_rng *exynos_rng)
{
int i;
int ret = 0;
for (i = 0 ; i < 5 ; i++)
exynos_rng_writel(exynos_rng, jiffies,
EXYNOS_PRNG_SEED_OFFSET + 4*i);
if (!(exynos_rng_readl(exynos_rng, EXYNOS_PRNG_STATUS_OFFSET)
& SEED_SETTING_DONE))
ret = -EIO;
return ret;
}
static int exynos_init(struct hwrng *rng)
{
struct exynos_rng *exynos_rng = container_of(rng,
struct exynos_rng, rng);
int ret = 0;
pm_runtime_get_sync(exynos_rng->dev);
ret = exynos_rng_configure(exynos_rng);
pm_runtime_mark_last_busy(exynos_rng->dev);
pm_runtime_put_autosuspend(exynos_rng->dev);
return ret;
}
static int exynos_read(struct hwrng *rng, void *buf,
size_t max, bool wait)
{
struct exynos_rng *exynos_rng = container_of(rng,
struct exynos_rng, rng);
u32 *data = buf;
int retry = 100;
int ret = 4;
pm_runtime_get_sync(exynos_rng->dev);
exynos_rng_writel(exynos_rng, PRNG_START, 0);
while (!(exynos_rng_readl(exynos_rng,
EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry)
cpu_relax();
if (!retry) {
ret = -ETIMEDOUT;
goto out;
}
exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);
*data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);
out:
pm_runtime_mark_last_busy(exynos_rng->dev);
pm_runtime_put_sync_autosuspend(exynos_rng->dev);
return ret;
}
static int exynos_rng_probe(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng;
struct resource *res;
int ret;
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
GFP_KERNEL);
if (!exynos_rng)
return -ENOMEM;
exynos_rng->dev = &pdev->dev;
exynos_rng->rng.name = "exynos";
exynos_rng->rng.init = exynos_init;
exynos_rng->rng.read = exynos_read;
exynos_rng->clk = devm_clk_get(&pdev->dev, "secss");
if (IS_ERR(exynos_rng->clk)) {
dev_err(&pdev->dev, "Couldn't get clock.\n");
return -ENOENT;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
exynos_rng->mem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(exynos_rng->mem))
return PTR_ERR(exynos_rng->mem);
platform_set_drvdata(pdev, exynos_rng);
pm_runtime_set_autosuspend_delay(&pdev->dev, EXYNOS_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
if (ret) {
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
return ret;
}
static int exynos_rng_remove(struct platform_device *pdev)
{
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
clk_disable_unprepare(exynos_rng->clk);
return 0;
}
static int __maybe_unused exynos_rng_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
return clk_prepare_enable(exynos_rng->clk);
}
static int __maybe_unused exynos_rng_suspend(struct device *dev)
{
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused exynos_rng_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
return exynos_rng_configure(exynos_rng);
}
static const struct dev_pm_ops exynos_rng_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(exynos_rng_suspend, exynos_rng_resume)
SET_RUNTIME_PM_OPS(exynos_rng_runtime_suspend,
exynos_rng_runtime_resume, NULL)
};
static const struct of_device_id exynos_rng_dt_match[] = {
{
.compatible = "samsung,exynos4-rng",
},
{ },
};
MODULE_DEVICE_TABLE(of, exynos_rng_dt_match);
static struct platform_driver exynos_rng_driver = {
.driver = {
.name = "exynos-rng",
.pm = &exynos_rng_pm_ops,
.of_match_table = exynos_rng_dt_match,
},
.probe = exynos_rng_probe,
.remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
MODULE_DESCRIPTION("EXYNOS 4 H/W Random Number Generator driver");
MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -62,6 +62,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/clk.h>
#define RNG_DATA 0x00
@ -69,6 +70,7 @@ struct meson_rng_data {
void __iomem *base;
struct platform_device *pdev;
struct hwrng rng;
struct clk *core_clk;
};
static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@ -81,11 +83,17 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
return sizeof(u32);
}
static void meson_rng_clk_disable(void *data)
{
clk_disable_unprepare(data);
}
static int meson_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_rng_data *data;
struct resource *res;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@ -98,6 +106,20 @@ static int meson_rng_probe(struct platform_device *pdev)
if (IS_ERR(data->base))
return PTR_ERR(data->base);
data->core_clk = devm_clk_get(dev, "core");
if (IS_ERR(data->core_clk))
data->core_clk = NULL;
if (data->core_clk) {
ret = clk_prepare_enable(data->core_clk);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, meson_rng_clk_disable,
data->core_clk);
if (ret)
return ret;
}
data->rng.name = pdev->name;
data->rng.read = meson_rng_read;

Просмотреть файл

@ -0,0 +1,168 @@
/*
* Driver for Mediatek Hardware Random Number Generator
*
* Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define MTK_RNG_DEV KBUILD_MODNAME
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#define USEC_POLL 2
#define TIMEOUT_POLL 20
#define RNG_CTRL 0x00
#define RNG_EN BIT(0)
#define RNG_READY BIT(31)
#define RNG_DATA 0x08
#define to_mtk_rng(p) container_of(p, struct mtk_rng, rng)
struct mtk_rng {
void __iomem *base;
struct clk *clk;
struct hwrng rng;
};
static int mtk_rng_init(struct hwrng *rng)
{
struct mtk_rng *priv = to_mtk_rng(rng);
u32 val;
int err;
err = clk_prepare_enable(priv->clk);
if (err)
return err;
val = readl(priv->base + RNG_CTRL);
val |= RNG_EN;
writel(val, priv->base + RNG_CTRL);
return 0;
}
static void mtk_rng_cleanup(struct hwrng *rng)
{
struct mtk_rng *priv = to_mtk_rng(rng);
u32 val;
val = readl(priv->base + RNG_CTRL);
val &= ~RNG_EN;
writel(val, priv->base + RNG_CTRL);
clk_disable_unprepare(priv->clk);
}
static bool mtk_rng_wait_ready(struct hwrng *rng, bool wait)
{
struct mtk_rng *priv = to_mtk_rng(rng);
int ready;
ready = readl(priv->base + RNG_CTRL) & RNG_READY;
if (!ready && wait)
readl_poll_timeout_atomic(priv->base + RNG_CTRL, ready,
ready & RNG_READY, USEC_POLL,
TIMEOUT_POLL);
return !!ready;
}
static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct mtk_rng *priv = to_mtk_rng(rng);
int retval = 0;
while (max >= sizeof(u32)) {
if (!mtk_rng_wait_ready(rng, wait))
break;
*(u32 *)buf = readl(priv->base + RNG_DATA);
retval += sizeof(u32);
buf += sizeof(u32);
max -= sizeof(u32);
}
return retval || !wait ? retval : -EIO;
}
static int mtk_rng_probe(struct platform_device *pdev)
{
struct resource *res;
int ret;
struct mtk_rng *priv;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "no iomem resource\n");
return -ENXIO;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->rng.name = pdev->name;
priv->rng.init = mtk_rng_init;
priv->rng.cleanup = mtk_rng_cleanup;
priv->rng.read = mtk_rng_read;
priv->clk = devm_clk_get(&pdev->dev, "rng");
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "no clock for device: %d\n", ret);
return ret;
}
priv->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
ret = devm_hwrng_register(&pdev->dev, &priv->rng);
if (ret) {
dev_err(&pdev->dev, "failed to register rng device: %d\n",
ret);
return ret;
}
dev_info(&pdev->dev, "registered RNG driver\n");
return 0;
}
static const struct of_device_id mtk_rng_match[] = {
{ .compatible = "mediatek,mt7623-rng" },
{},
};
MODULE_DEVICE_TABLE(of, mtk_rng_match);
static struct platform_driver mtk_rng_driver = {
.probe = mtk_rng_probe,
.driver = {
.name = MTK_RNG_DEV,
.of_match_table = mtk_rng_match,
},
};
module_platform_driver(mtk_rng_driver);
MODULE_DESCRIPTION("Mediatek Random Number Generator Driver");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -748,9 +748,7 @@ static int n2rng_probe(struct platform_device *op)
dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n",
np->hvapi_major, np->hvapi_minor);
np->units = devm_kzalloc(&op->dev,
sizeof(struct n2rng_unit) * np->num_units,
np->units = devm_kcalloc(&op->dev, np->num_units, sizeof(*np->units),
GFP_KERNEL);
err = -ENOMEM;
if (!np->units)

Просмотреть файл

@ -398,16 +398,6 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
return err;
}
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR(priv->clk)) {
err = clk_prepare_enable(priv->clk);
if (err)
dev_err(&pdev->dev, "unable to enable the clk, "
"err = %d\n", err);
}
/*
* On OMAP4, enabling the shutdown_oflo interrupt is
* done in the interrupt mask register. There is no
@ -478,6 +468,18 @@ static int omap_rng_probe(struct platform_device *pdev)
goto err_ioremap;
}
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR(priv->clk)) {
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(&pdev->dev,
"Unable to enable the clk: %d\n", ret);
goto err_register;
}
}
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
if (ret)

Просмотреть файл

@ -20,84 +20,100 @@
* TODO: add support for reading sizes other than 32bits and masking
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/hrtimer.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/timeriomem-rng.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/completion.h>
struct timeriomem_rng_private_data {
struct timeriomem_rng_private {
void __iomem *io_base;
unsigned int expires;
unsigned int period;
ktime_t period;
unsigned int present:1;
struct timer_list timer;
struct hrtimer timer;
struct completion completion;
struct hwrng timeriomem_rng_ops;
struct hwrng rng_ops;
};
#define to_rng_priv(rng) \
((struct timeriomem_rng_private_data *)rng->priv)
/*
* have data return 1, however return 0 if we have nothing
*/
static int timeriomem_rng_data_present(struct hwrng *rng, int wait)
static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
size_t max, bool wait)
{
struct timeriomem_rng_private_data *priv = to_rng_priv(rng);
struct timeriomem_rng_private *priv =
container_of(hwrng, struct timeriomem_rng_private, rng_ops);
int retval = 0;
int period_us = ktime_to_us(priv->period);
if (!wait || priv->present)
return priv->present;
/*
* The RNG provides 32-bits per read. Ensure there is enough space for
* at minimum one read.
*/
if (max < sizeof(u32))
return 0;
/*
* There may not have been enough time for new data to be generated
* since the last request. If the caller doesn't want to wait, let them
* bail out. Otherwise, wait for the completion. If the new data has
* already been generated, the completion should already be available.
*/
if (!wait && !priv->present)
return 0;
wait_for_completion(&priv->completion);
return 1;
}
do {
/*
* After the first read, all additional reads will need to wait
* for the RNG to generate new data. Since the period can have
* a wide range of values (1us to 1s have been observed), allow
* for 1% tolerance in the sleep time rather than a fixed value.
*/
if (retval > 0)
usleep_range(period_us,
period_us + min(1, period_us / 100));
static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data)
{
struct timeriomem_rng_private_data *priv = to_rng_priv(rng);
unsigned long cur;
s32 delay;
*(u32 *)data = readl(priv->io_base);
retval += sizeof(u32);
data += sizeof(u32);
max -= sizeof(u32);
} while (wait && max > sizeof(u32));
*data = readl(priv->io_base);
cur = jiffies;
delay = cur - priv->expires;
delay = priv->period - (delay % priv->period);
priv->expires = cur + delay;
/*
* Block any new callers until the RNG has had time to generate new
* data.
*/
priv->present = 0;
reinit_completion(&priv->completion);
mod_timer(&priv->timer, priv->expires);
hrtimer_forward_now(&priv->timer, priv->period);
hrtimer_restart(&priv->timer);
return 4;
return retval;
}
static void timeriomem_rng_trigger(unsigned long data)
static enum hrtimer_restart timeriomem_rng_trigger(struct hrtimer *timer)
{
struct timeriomem_rng_private_data *priv
= (struct timeriomem_rng_private_data *)data;
struct timeriomem_rng_private *priv
= container_of(timer, struct timeriomem_rng_private, timer);
priv->present = 1;
complete(&priv->completion);
return HRTIMER_NORESTART;
}
static int timeriomem_rng_probe(struct platform_device *pdev)
{
struct timeriomem_rng_data *pdata = pdev->dev.platform_data;
struct timeriomem_rng_private_data *priv;
struct timeriomem_rng_private *priv;
struct resource *res;
int err = 0;
int period;
@ -119,7 +135,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
priv = devm_kzalloc(&pdev->dev,
sizeof(struct timeriomem_rng_private_data), GFP_KERNEL);
sizeof(struct timeriomem_rng_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@ -139,54 +155,41 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
period = pdata->period;
}
priv->period = usecs_to_jiffies(period);
if (priv->period < 1) {
dev_err(&pdev->dev, "period is less than one jiffy\n");
return -EINVAL;
}
priv->expires = jiffies;
priv->present = 1;
priv->period = ns_to_ktime(period * NSEC_PER_USEC);
init_completion(&priv->completion);
complete(&priv->completion);
hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
priv->timer.function = timeriomem_rng_trigger;
setup_timer(&priv->timer, timeriomem_rng_trigger, (unsigned long)priv);
priv->timeriomem_rng_ops.name = dev_name(&pdev->dev);
priv->timeriomem_rng_ops.data_present = timeriomem_rng_data_present;
priv->timeriomem_rng_ops.data_read = timeriomem_rng_data_read;
priv->timeriomem_rng_ops.priv = (unsigned long)priv;
priv->rng_ops.name = dev_name(&pdev->dev);
priv->rng_ops.read = timeriomem_rng_read;
priv->io_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->io_base)) {
err = PTR_ERR(priv->io_base);
goto out_timer;
return PTR_ERR(priv->io_base);
}
err = hwrng_register(&priv->timeriomem_rng_ops);
/* Assume random data is already available. */
priv->present = 1;
complete(&priv->completion);
err = hwrng_register(&priv->rng_ops);
if (err) {
dev_err(&pdev->dev, "problem registering\n");
goto out_timer;
return err;
}
dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
priv->io_base, period);
return 0;
out_timer:
del_timer_sync(&priv->timer);
return err;
}
static int timeriomem_rng_remove(struct platform_device *pdev)
{
struct timeriomem_rng_private_data *priv = platform_get_drvdata(pdev);
struct timeriomem_rng_private *priv = platform_get_drvdata(pdev);
hwrng_unregister(&priv->timeriomem_rng_ops);
del_timer_sync(&priv->timer);
hwrng_unregister(&priv->rng_ops);
hrtimer_cancel(&priv->timer);
return 0;
}

Просмотреть файл

@ -193,7 +193,7 @@
/* CLKID_I2C */
/* #define CLKID_SAR_ADC */
#define CLKID_SMART_CARD 24
#define CLKID_RNG0 25
/* CLKID_RNG0 */
#define CLKID_UART0 26
#define CLKID_SDHC 27
#define CLKID_STREAM 28

Просмотреть файл

@ -388,6 +388,21 @@ config CRYPTO_DEV_MXC_SCC
This option enables support for the Security Controller (SCC)
found in Freescale i.MX25 chips.
config CRYPTO_DEV_EXYNOS_RNG
tristate "EXYNOS HW pseudo random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
select CRYPTO_RNG
---help---
This driver provides kernel-side support through the
cryptographic API for the pseudo random number generator hardware
found on Exynos SoCs.
To compile this driver as a module, choose M here: the
module will be called exynos-rng.
If unsure, say Y.
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
@ -515,6 +530,13 @@ config CRYPTO_DEV_MXS_DCP
source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
config CRYPTO_DEV_CAVIUM_ZIP
tristate "Cavium ZIP driver"
depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
---help---
Select this option if you want to enable compression/decompression
acceleration on Cavium's ARM based SoCs
config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
@ -619,4 +641,6 @@ config CRYPTO_DEV_BCM_SPU
Secure Processing Unit (SPU). The SPU driver registers ablkcipher,
ahash, and aead algorithms with the kernel cryptographic API.
source "drivers/crypto/stm32/Kconfig"
endif # CRYPTO_HW

Просмотреть файл

@ -2,9 +2,11 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
@ -30,6 +32,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/

Просмотреть файл

@ -50,7 +50,7 @@
static void crypto4xx_hw_init(struct crypto4xx_device *dev)
{
union ce_ring_size ring_size;
union ce_ring_contol ring_ctrl;
union ce_ring_control ring_ctrl;
union ce_part_ring_size part_ring_size;
union ce_io_threshold io_threshold;
u32 rand_num;

Просмотреть файл

@ -180,7 +180,7 @@ union ce_ring_size {
} __attribute__((packed));
#define CRYPTO4XX_RING_CONTROL_OFFSET 0x54
union ce_ring_contol {
union ce_ring_control {
struct {
u32 continuous:1;
u32 rsv:5;

Просмотреть файл

@ -312,7 +312,7 @@ int do_shash(unsigned char *name, unsigned char *result,
}
rc = crypto_shash_final(&sdesc->shash, result);
if (rc)
pr_err("%s: Could not genereate %s hash", __func__, name);
pr_err("%s: Could not generate %s hash", __func__, name);
do_shash_err:
crypto_free_shash(hash);

Просмотреть файл

@ -87,6 +87,23 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
To compile this as a module, choose M here: the module
will be called caamalg.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
tristate "Queue Interface as Crypto API backend"
depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
default y
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
help
Selecting this will use CAAM Queue Interface (QI) for sending
& receiving crypto jobs to/from CAAM. This gives better performance
than job ring interface when the number of cores are more than the
number of job rings assigned to the kernel. The number of portals
assigned to the kernel should also be more than the number of
job rings.
To compile this as a module, choose M here: the module
will be called caamalg_qi.
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
depends on CRYPTO_DEV_FSL_CAAM_JR
@ -136,4 +153,5 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
information in the CAAM driver.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API
def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)

Просмотреть файл

@ -8,6 +8,7 @@ endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
@ -16,3 +17,7 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
caam-objs := ctrl.o
caam_jr-objs := jr.o key_gen.o error.o
caam_pkc-y := caampkc.o pkc_desc.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
caam-objs += qi.o
endif

Просмотреть файл

@ -266,8 +266,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
is_rfc3686, nonce, ctx1_iv_off);
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE);
@ -299,7 +300,7 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
nonce, ctx1_iv_off);
nonce, ctx1_iv_off, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), DMA_TO_DEVICE);
@ -333,7 +334,7 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
ctx1_iv_off);
ctx1_iv_off, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), DMA_TO_DEVICE);

Просмотреть файл

@ -265,17 +265,19 @@ static void init_sh_desc_key_aead(u32 * const desc,
* split key is to be used, the size of the split key itself is
* specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
* SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
*
* Note: Requires an MDHA split key.
*/
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int icvsize,
const bool is_rfc3686, u32 *nonce,
const u32 ctx1_iv_off)
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
{
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
@ -284,6 +286,25 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
if (is_qi) {
u32 *wait_load_cmd;
/* REG3 = assoclen */
append_seq_load(desc, 4, LDST_CLASS_DECO |
LDST_SRCDST_WORD_DECO_MATH3 |
(4 << LDST_OFFSET_SHIFT));
wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_CALM | JUMP_COND_NCP |
JUMP_COND_NOP | JUMP_COND_NIP |
JUMP_COND_NIFP);
set_jump_tgt_here(desc, wait_load_cmd);
append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
(ctx1_iv_off << LDST_OFFSET_SHIFT));
}
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
@ -338,6 +359,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
*
* Note: Requires an MDHA split key.
*/
@ -345,7 +367,7 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
const u32 ctx1_iv_off)
const u32 ctx1_iv_off, const bool is_qi)
{
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
@ -354,6 +376,26 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
if (is_qi) {
u32 *wait_load_cmd;
/* REG3 = assoclen */
append_seq_load(desc, 4, LDST_CLASS_DECO |
LDST_SRCDST_WORD_DECO_MATH3 |
(4 << LDST_OFFSET_SHIFT));
wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_CALM | JUMP_COND_NCP |
JUMP_COND_NOP | JUMP_COND_NIP |
JUMP_COND_NIFP);
set_jump_tgt_here(desc, wait_load_cmd);
if (!geniv)
append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
(ctx1_iv_off << LDST_OFFSET_SHIFT));
}
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
if (geniv)
@ -423,21 +465,44 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
*
* Note: Requires an MDHA split key.
*/
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off)
u32 *nonce, const u32 ctx1_iv_off,
const bool is_qi)
{
u32 geniv, moveiv;
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
if (is_rfc3686)
if (is_qi) {
u32 *wait_load_cmd;
/* REG3 = assoclen */
append_seq_load(desc, 4, LDST_CLASS_DECO |
LDST_SRCDST_WORD_DECO_MATH3 |
(4 << LDST_OFFSET_SHIFT));
wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_CALM | JUMP_COND_NCP |
JUMP_COND_NOP | JUMP_COND_NIP |
JUMP_COND_NIFP);
set_jump_tgt_here(desc, wait_load_cmd);
}
if (is_rfc3686) {
if (is_qi)
append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
(ctx1_iv_off << LDST_OFFSET_SHIFT));
goto copy_iv;
}
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |

Просмотреть файл

@ -12,6 +12,9 @@
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
/* Note: Nonce is counted in cdata.keylen */
#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
@ -45,20 +48,22 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int icvsize,
const bool is_rfc3686, u32 *nonce,
const u32 ctx1_iv_off);
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
const bool is_qi);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
const u32 ctx1_iv_off);
const u32 ctx1_iv_off, const bool is_qi);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off);
u32 *nonce, const u32 ctx1_iv_off,
const bool is_qi);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -18,6 +18,10 @@
bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
#ifdef CONFIG_CAAM_QI
#include "qi.h"
#endif
/*
* i.MX targets tend to have clock control subsystems that can
* enable/disable clocking to our device.
@ -310,6 +314,11 @@ static int caam_remove(struct platform_device *pdev)
/* Remove platform devices under the crypto node */
of_platform_depopulate(ctrldev);
#ifdef CONFIG_CAAM_QI
if (ctrlpriv->qidev)
caam_qi_shutdown(ctrlpriv->qidev);
#endif
/* De-initialize RNG state handles initialized by this driver. */
if (ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
@ -400,23 +409,6 @@ int caam_get_era(void)
}
EXPORT_SYMBOL(caam_get_era);
#ifdef CONFIG_DEBUG_FS
static int caam_debugfs_u64_get(void *data, u64 *val)
{
*val = caam64_to_cpu(*(u64 *)data);
return 0;
}
static int caam_debugfs_u32_get(void *data, u64 *val)
{
*val = caam32_to_cpu(*(u32 *)data);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
#endif
static const struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
@ -613,6 +605,18 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
#ifdef CONFIG_DEBUG_FS
/*
* FIXME: needs better naming distinction, as some amalgamation of
* "caam" and nprop->full_name. The OF name isn't distinctive,
* but does separate instances
*/
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
#endif
ring = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
@ -637,6 +641,13 @@ static int caam_probe(struct platform_device *pdev)
);
/* This is all that's required to physically enable QI */
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
/* If QMAN driver is present, init CAAM-QI backend */
#ifdef CONFIG_CAAM_QI
ret = caam_qi_init(pdev);
if (ret)
dev_err(dev, "caam qi i/f init failed: %d\n", ret);
#endif
}
/* If no QI and no rings specified, quit and go home */
@ -724,17 +735,6 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
#ifdef CONFIG_DEBUG_FS
/*
* FIXME: needs better naming distinction, as some amalgamation of
* "caam" and nprop->full_name. The OF name isn't distinctive,
* but does separate instances
*/
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
/* Controller-level - performance monitor counters */
ctrlpriv->ctl_rq_dequeued =
debugfs_create_file("rq_dequeued",
@ -817,6 +817,9 @@ static int caam_probe(struct platform_device *pdev)
return 0;
caam_remove:
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(ctrlpriv->dfs_root);
#endif
caam_remove(pdev);
return ret;

Просмотреть файл

@ -4,6 +4,9 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
#ifndef DESC_CONSTR_H
#define DESC_CONSTR_H
#include "desc.h"
#include "regs.h"
@ -491,3 +494,5 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1;
}
#endif /* DESC_CONSTR_H */

Просмотреть файл

@ -66,6 +66,9 @@ struct caam_drv_private_jr {
struct caam_drv_private {
struct device *dev;
#ifdef CONFIG_CAAM_QI
struct device *qidev;
#endif
struct platform_device *pdev;
/* Physical-presence section */
@ -109,9 +112,30 @@ struct caam_drv_private {
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
#ifdef CONFIG_CAAM_QI
struct dentry *qi_congested;
#endif
#endif
};
void caam_jr_algapi_init(struct device *dev);
void caam_jr_algapi_remove(struct device *dev);
#ifdef CONFIG_DEBUG_FS
static int caam_debugfs_u64_get(void *data, u64 *val)
{
*val = caam64_to_cpu(*(u64 *)data);
return 0;
}
static int caam_debugfs_u32_get(void *data, u64 *val)
{
*val = caam32_to_cpu(*(u32 *)data);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
#endif
#endif /* INTERN_H */

805
drivers/crypto/caam/qi.c Normal file
Просмотреть файл

@ -0,0 +1,805 @@
/*
* CAAM/SEC 4.x QI transport/backend driver
* Queue Interface backend functionality
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2017 NXP
*/
#include <linux/cpumask.h>
#include <linux/kthread.h>
#include <soc/fsl/qman.h>
#include "regs.h"
#include "qi.h"
#include "desc.h"
#include "intern.h"
#include "desc_constr.h"
#define PREHDR_RSLS_SHIFT 31
/*
* Use a reasonable backlog of frames (per CPU) as congestion threshold,
* so that resources used by the in-flight buffers do not become a memory hog.
*/
#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
/* Length of a single buffer in the QI driver memory cache */
#define CAAM_QI_MEMCACHE_SIZE 512
#define CAAM_QI_ENQUEUE_RETRIES 10000
#define CAAM_NAPI_WEIGHT 63
/*
* caam_napi - struct holding CAAM NAPI-related params
* @irqtask: IRQ task for QI backend
* @p: QMan portal
*/
struct caam_napi {
struct napi_struct irqtask;
struct qman_portal *p;
};
/*
* caam_qi_pcpu_priv - percpu private data structure to main list of pending
* responses expected on each cpu.
* @caam_napi: CAAM NAPI params
* @net_dev: netdev used by NAPI
* @rsp_fq: response FQ from CAAM
*/
struct caam_qi_pcpu_priv {
struct caam_napi caam_napi;
struct net_device net_dev;
struct qman_fq *rsp_fq;
} ____cacheline_aligned;
static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
/*
* caam_qi_priv - CAAM QI backend private params
* @cgr: QMan congestion group
* @qi_pdev: platform device for QI backend
*/
struct caam_qi_priv {
struct qman_cgr cgr;
struct platform_device *qi_pdev;
};
static struct caam_qi_priv qipriv ____cacheline_aligned;
/*
* This is written by only one core - the one that initialized the CGR - and
* read by multiple cores (all the others).
*/
bool caam_congested __read_mostly;
EXPORT_SYMBOL(caam_congested);
#ifdef CONFIG_DEBUG_FS
/*
* This is a counter for the number of times the congestion group (where all
* the request and response queueus are) reached congestion. Incremented
* each time the congestion callback is called with congested == true.
*/
static u64 times_congested;
#endif
/*
* CPU from where the module initialised. This is required because QMan driver
* requires CGRs to be removed from same CPU from where they were originally
* allocated.
*/
static int mod_init_cpu;
/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
* doing malloc on the hotpath.
* NOTE: A more elegant solution would be to have some headroom in the frames
* being processed. This could be added by the dpaa-ethernet driver.
* This would pose a problem for userspace application processing which
* cannot know of this limitation. So for now, this will work.
* NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
*/
static struct kmem_cache *qi_cache;
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
{
struct qm_fd fd;
dma_addr_t addr;
int ret;
int num_retries = 0;
qm_fd_clear_fd(&fd);
qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(qidev, addr)) {
dev_err(qidev, "DMA mapping error for QI enqueue request\n");
return -EIO;
}
qm_fd_addr_set64(&fd, addr);
do {
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
if (likely(!ret))
return 0;
if (ret != -EBUSY)
break;
num_retries++;
} while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
dev_err(qidev, "qman_enqueue failed: %d\n", ret);
return ret;
}
EXPORT_SYMBOL(caam_qi_enqueue);
static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
const union qm_mr_entry *msg)
{
const struct qm_fd *fd;
struct caam_drv_req *drv_req;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
fd = &msg->ern.fd;
if (qm_fd_get_format(fd) != qm_fd_compound) {
dev_err(qidev, "Non-compound FD from CAAM\n");
return;
}
drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
if (!drv_req) {
dev_err(qidev,
"Can't find original request for CAAM response\n");
return;
}
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
drv_req->cbk(drv_req, -EIO);
}
static struct qman_fq *create_caam_req_fq(struct device *qidev,
struct qman_fq *rsp_fq,
dma_addr_t hwdesc,
int fq_sched_flag)
{
int ret;
struct qman_fq *req_fq;
struct qm_mcc_initfq opts;
req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
if (!req_fq)
return ERR_PTR(-ENOMEM);
req_fq->cb.ern = caam_fq_ern_cb;
req_fq->cb.fqs = NULL;
ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
if (ret) {
dev_err(qidev, "Failed to create session req FQ\n");
goto create_req_fq_fail;
}
memset(&opts, 0, sizeof(opts));
opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
QM_INITFQ_WE_CONTEXTB |
QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
qm_fqd_context_a_set64(&opts.fqd, hwdesc);
opts.fqd.cgid = qipriv.cgr.cgrid;
ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
if (ret) {
dev_err(qidev, "Failed to init session req FQ\n");
goto init_req_fq_fail;
}
dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
smp_processor_id());
return req_fq;
init_req_fq_fail:
qman_destroy_fq(req_fq);
create_req_fq_fail:
kfree(req_fq);
return ERR_PTR(ret);
}
static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
{
int ret;
ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
QMAN_VOLATILE_FLAG_FINISH,
QM_VDQCR_PRECEDENCE_VDQCR |
QM_VDQCR_NUMFRAMES_TILLEMPTY);
if (ret) {
dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
return ret;
}
do {
struct qman_portal *p;
p = qman_get_affine_portal(smp_processor_id());
qman_p_poll_dqrr(p, 16);
} while (fq->flags & QMAN_FQ_STATE_NE);
return 0;
}
static int kill_fq(struct device *qidev, struct qman_fq *fq)
{
u32 flags;
int ret;
ret = qman_retire_fq(fq, &flags);
if (ret < 0) {
dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
return ret;
}
if (!ret)
goto empty_fq;
/* Async FQ retirement condition */
if (ret == 1) {
/* Retry till FQ gets in retired state */
do {
msleep(20);
} while (fq->state != qman_fq_state_retired);
WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
}
empty_fq:
if (fq->flags & QMAN_FQ_STATE_NE) {
ret = empty_retired_fq(qidev, fq);
if (ret) {
dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
fq->fqid);
return ret;
}
}
ret = qman_oos_fq(fq);
if (ret)
dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
qman_destroy_fq(fq);
return ret;
}
static int empty_caam_fq(struct qman_fq *fq)
{
int ret;
struct qm_mcr_queryfq_np np;
/* Wait till the older CAAM FQ get empty */
do {
ret = qman_query_fq_np(fq, &np);
if (ret)
return ret;
if (!qm_mcr_np_get(&np, frm_cnt))
break;
msleep(20);
} while (1);
/*
* Give extra time for pending jobs from this FQ in holding tanks
* to get processed
*/
msleep(20);
return 0;
}
int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
{
int ret;
u32 num_words;
struct qman_fq *new_fq, *old_fq;
struct device *qidev = drv_ctx->qidev;
num_words = desc_len(sh_desc);
if (num_words > MAX_SDLEN) {
dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
return -EINVAL;
}
/* Note down older req FQ */
old_fq = drv_ctx->req_fq;
/* Create a new req FQ in parked state */
new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
drv_ctx->context_a, 0);
if (unlikely(IS_ERR_OR_NULL(new_fq))) {
dev_err(qidev, "FQ allocation for shdesc update failed\n");
return PTR_ERR(new_fq);
}
/* Hook up new FQ to context so that new requests keep queuing */
drv_ctx->req_fq = new_fq;
/* Empty and remove the older FQ */
ret = empty_caam_fq(old_fq);
if (ret) {
dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
/* We can revert to older FQ */
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
new_fq->fqid);
return ret;
}
/*
* Re-initialise pre-header. Set RSLS and SDLEN.
* Update the shared descriptor for driver context.
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
dma_sync_single_for_device(qidev, drv_ctx->context_a,
sizeof(drv_ctx->sh_desc) +
sizeof(drv_ctx->prehdr),
DMA_BIDIRECTIONAL);
/* Put the new FQ in scheduled state */
ret = qman_schedule_fq(new_fq);
if (ret) {
dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
/*
* We can kill new FQ and revert to old FQ.
* Since the desc is already modified, it is success case
*/
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
new_fq->fqid);
} else if (kill_fq(qidev, old_fq)) {
dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid);
}
return 0;
}
EXPORT_SYMBOL(caam_drv_ctx_update);
struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
int *cpu,
u32 *sh_desc)
{
size_t size;
u32 num_words;
dma_addr_t hwdesc;
struct caam_drv_ctx *drv_ctx;
const cpumask_t *cpus = qman_affine_cpus();
static DEFINE_PER_CPU(int, last_cpu);
num_words = desc_len(sh_desc);
if (num_words > MAX_SDLEN) {
dev_err(qidev, "Invalid descriptor len: %d words\n",
num_words);
return ERR_PTR(-EINVAL);
}
drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
if (!drv_ctx)
return ERR_PTR(-ENOMEM);
/*
* Initialise pre-header - set RSLS and SDLEN - and shared descriptor
* and dma-map them.
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(qidev, hwdesc)) {
dev_err(qidev, "DMA map error for preheader + shdesc\n");
kfree(drv_ctx);
return ERR_PTR(-ENOMEM);
}
drv_ctx->context_a = hwdesc;
/* If given CPU does not own the portal, choose another one that does */
if (!cpumask_test_cpu(*cpu, cpus)) {
int *pcpu = &get_cpu_var(last_cpu);
*pcpu = cpumask_next(*pcpu, cpus);
if (*pcpu >= nr_cpu_ids)
*pcpu = cpumask_first(cpus);
*cpu = *pcpu;
put_cpu_var(last_cpu);
}
drv_ctx->cpu = *cpu;
/* Find response FQ hooked with this CPU */
drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
/* Attach request FQ */
drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
QMAN_INITFQ_FLAG_SCHED);
if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
dev_err(qidev, "create_caam_req_fq failed\n");
dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
kfree(drv_ctx);
return ERR_PTR(-ENOMEM);
}
drv_ctx->qidev = qidev;
return drv_ctx;
}
EXPORT_SYMBOL(caam_drv_ctx_init);
void *qi_cache_alloc(gfp_t flags)
{
return kmem_cache_alloc(qi_cache, flags);
}
EXPORT_SYMBOL(qi_cache_alloc);
void qi_cache_free(void *obj)
{
kmem_cache_free(qi_cache, obj);
}
EXPORT_SYMBOL(qi_cache_free);
static int caam_qi_poll(struct napi_struct *napi, int budget)
{
struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
int cleaned = qman_p_poll_dqrr(np->p, budget);
if (cleaned < budget) {
napi_complete(napi);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
return cleaned;
}
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
{
if (IS_ERR_OR_NULL(drv_ctx))
return;
/* Remove request FQ */
if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
DMA_BIDIRECTIONAL);
kfree(drv_ctx);
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
int caam_qi_shutdown(struct device *qidev)
{
int i, ret;
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
const cpumask_t *cpus = qman_affine_cpus();
struct cpumask old_cpumask = current->cpus_allowed;
for_each_cpu(i, cpus) {
struct napi_struct *irqtask;
irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
napi_disable(irqtask);
netif_napi_del(irqtask);
if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
}
/*
* QMan driver requires CGRs to be deleted from same CPU from where they
* were instantiated. Hence we get the module removal execute from the
* same CPU from where it was originally inserted.
*/
set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
ret = qman_delete_cgr(&priv->cgr);
if (ret)
dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
else
qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
/* Now that we're done with the CGRs, restore the cpus allowed mask */
set_cpus_allowed_ptr(current, &old_cpumask);
platform_device_unregister(priv->qi_pdev);
return ret;
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
{
caam_congested = congested;
if (congested) {
#ifdef CONFIG_DEBUG_FS
times_congested++;
#endif
pr_debug_ratelimited("CAAM entered congestion\n");
} else {
pr_debug_ratelimited("CAAM exited congestion\n");
}
}
static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
{
/*
* In case of threaded ISR, for RT kernels in_irq() does not return
* appropriate value, so use in_serving_softirq to distinguish between
* softirq and irq contexts.
*/
if (unlikely(in_irq() || !in_serving_softirq())) {
/* Disable QMan IRQ source and invoke NAPI */
qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
np->p = p;
napi_schedule(&np->irqtask);
return 1;
}
return 0;
}
static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct qman_fq *rsp_fq,
const struct qm_dqrr_entry *dqrr)
{
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
struct caam_drv_req *drv_req;
const struct qm_fd *fd;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
u32 status;
if (caam_qi_napi_schedule(p, caam_napi))
return qman_cb_dqrr_stop;
fd = &dqrr->fd;
status = be32_to_cpu(fd->status);
if (unlikely(status))
dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
dev_err(qidev, "Non-compound FD from CAAM\n");
return qman_cb_dqrr_consume;
}
drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
if (unlikely(!drv_req)) {
dev_err(qidev,
"Can't find original request for caam response\n");
return qman_cb_dqrr_consume;
}
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
drv_req->cbk(drv_req, status);
return qman_cb_dqrr_consume;
}
static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
{
struct qm_mcc_initfq opts;
struct qman_fq *fq;
int ret;
fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
if (!fq)
return -ENOMEM;
fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
if (ret) {
dev_err(qidev, "Rsp FQ create failed\n");
kfree(fq);
return -ENODEV;
}
memset(&opts, 0, sizeof(opts));
opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
QM_INITFQ_WE_CONTEXTB |
QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
opts.fqd.cgid = qipriv.cgr.cgrid;
opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
QM_STASHING_EXCL_DATA;
qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
if (ret) {
dev_err(qidev, "Rsp FQ init failed\n");
kfree(fq);
return -ENODEV;
}
per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
dev_info(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
return 0;
}
static int init_cgr(struct device *qidev)
{
int ret;
struct qm_mcc_initcgr opts;
const u64 cpus = *(u64 *)qman_affine_cpus();
const int num_cpus = hweight64(cpus);
const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
if (ret) {
dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
return ret;
}
qipriv.cgr.cb = cgr_cb;
memset(&opts, 0, sizeof(opts));
opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
QM_CGR_WE_MODE);
opts.cgr.cscn_en = QM_CGR_EN;
opts.cgr.mode = QMAN_CGR_MODE_FRAME;
qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
if (ret) {
dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
qipriv.cgr.cgrid);
return ret;
}
dev_info(qidev, "Congestion threshold set to %llu\n", val);
return 0;
}
static int alloc_rsp_fqs(struct device *qidev)
{
int ret, i;
const cpumask_t *cpus = qman_affine_cpus();
/*Now create response FQs*/
for_each_cpu(i, cpus) {
ret = alloc_rsp_fq_cpu(qidev, i);
if (ret) {
dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
return ret;
}
}
return 0;
}
static void free_rsp_fqs(void)
{
int i;
const cpumask_t *cpus = qman_affine_cpus();
for_each_cpu(i, cpus)
kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
}
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
struct platform_device *qi_pdev;
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
struct cpumask old_cpumask = current->cpus_allowed;
static struct platform_device_info qi_pdev_info = {
.name = "caam_qi",
.id = PLATFORM_DEVID_NONE
};
/*
* QMAN requires CGRs to be removed from same CPU+portal from where it
* was originally allocated. Hence we need to note down the
* initialisation CPU and use the same CPU for module exit.
* We select the first CPU to from the list of portal owning CPUs.
* Then we pin module init to this CPU.
*/
mod_init_cpu = cpumask_first(cpus);
set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
qi_pdev_info.parent = ctrldev;
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
qi_pdev = platform_device_register_full(&qi_pdev_info);
if (IS_ERR(qi_pdev))
return PTR_ERR(qi_pdev);
ctrlpriv = dev_get_drvdata(ctrldev);
qidev = &qi_pdev->dev;
qipriv.qi_pdev = qi_pdev;
dev_set_drvdata(qidev, &qipriv);
/* Initialize the congestion detection */
err = init_cgr(qidev);
if (err) {
dev_err(qidev, "CGR initialization failed: %d\n", err);
platform_device_unregister(qi_pdev);
return err;
}
/* Initialise response FQs */
err = alloc_rsp_fqs(qidev);
if (err) {
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
free_rsp_fqs();
platform_device_unregister(qi_pdev);
return err;
}
/*
* Enable the NAPI contexts on each of the core which has an affine
* portal.
*/
for_each_cpu(i, cpus) {
struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
struct caam_napi *caam_napi = &priv->caam_napi;
struct napi_struct *irqtask = &caam_napi->irqtask;
struct net_device *net_dev = &priv->net_dev;
net_dev->dev = *qidev;
INIT_LIST_HEAD(&net_dev->napi_list);
netif_napi_add(net_dev, irqtask, caam_qi_poll,
CAAM_NAPI_WEIGHT);
napi_enable(irqtask);
}
/* Hook up QI device to parent controlling caam device */
ctrlpriv->qidev = qidev;
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
SLAB_CACHE_DMA, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
free_rsp_fqs();
platform_device_unregister(qi_pdev);
return -ENOMEM;
}
/* Done with the CGRs; restore the cpus allowed mask */
set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
ctrlpriv->qi_congested = debugfs_create_file("qi_congested", 0444,
ctrlpriv->ctl,
&times_congested,
&caam_fops_u64_ro);
#endif
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
}

201
drivers/crypto/caam/qi.h Normal file
Просмотреть файл

@ -0,0 +1,201 @@
/*
* Public definitions for the CAAM/QI (Queue Interface) backend.
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2017 NXP
*/
#ifndef __QI_H__
#define __QI_H__
#include <soc/fsl/qman.h>
#include "compat.h"
#include "desc.h"
#include "desc_constr.h"
/*
* CAAM hardware constructs a job descriptor which points to a shared descriptor
* (as pointed by context_a of to-CAAM FQ).
* When the job descriptor is executed by DECO, the whole job descriptor
* together with shared descriptor gets loaded in DECO buffer, which is
* 64 words (each 32-bit) long.
*
* The job descriptor constructed by CAAM hardware has the following layout:
*
* HEADER (1 word)
* Shdesc ptr (1 or 2 words)
* SEQ_OUT_PTR (1 word)
* Out ptr (1 or 2 words)
* Out length (1 word)
* SEQ_IN_PTR (1 word)
* In ptr (1 or 2 words)
* In length (1 word)
*
* The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
*
* Apart from shdesc contents, the total number of words that get loaded in DECO
* buffer are '8' or '11'. The remaining words in DECO buffer can be used for
* storing shared descriptor.
*/
#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
extern bool caam_congested __read_mostly;
/*
* This is the request structure the driver application should fill while
* submitting a job to driver.
*/
struct caam_drv_req;
/*
* caam_qi_cbk - application's callback function invoked by the driver when the
* request has been successfully processed.
* @drv_req: original request that was submitted
* @status: completion status of request (0 - success, non-zero - error code)
*/
typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
enum optype {
ENCRYPT,
DECRYPT,
GIVENCRYPT,
NUM_OP
};
/**
* caam_drv_ctx - CAAM/QI backend driver context
*
* The jobs are processed by the driver against a driver context.
* With every cryptographic context, a driver context is attached.
* The driver context contains data for private use by driver.
* For the applications, this is an opaque structure.
*
* @prehdr: preheader placed before shrd desc
* @sh_desc: shared descriptor
* @context_a: shared descriptor dma address
* @req_fq: to-CAAM request frame queue
* @rsp_fq: from-CAAM response frame queue
* @cpu: cpu on which to receive CAAM response
* @op_type: operation type
* @qidev: device pointer for CAAM/QI backend
*/
struct caam_drv_ctx {
u32 prehdr[2];
u32 sh_desc[MAX_SDLEN];
dma_addr_t context_a;
struct qman_fq *req_fq;
struct qman_fq *rsp_fq;
int cpu;
enum optype op_type;
struct device *qidev;
} ____cacheline_aligned;
/**
* caam_drv_req - The request structure the driver application should fill while
* submitting a job to driver.
* @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
* buffers.
* @cbk: callback function to invoke when job is completed
* @app_ctx: arbitrary context attached with request by the application
*
* The fields mentioned below should not be used by application.
* These are for private use by driver.
*
* @hdr__: linked list header to maintain list of outstanding requests to CAAM
* @hwaddr: DMA address for the S/G table.
*/
struct caam_drv_req {
struct qm_sg_entry fd_sgt[2];
struct caam_drv_ctx *drv_ctx;
caam_qi_cbk cbk;
void *app_ctx;
} ____cacheline_aligned;
/**
* caam_drv_ctx_init - Initialise a CAAM/QI driver context
*
* A CAAM/QI driver context must be attached with each cryptographic context.
* This function allocates memory for CAAM/QI context and returns a handle to
* the application. This handle must be submitted along with each enqueue
* request to the driver by the application.
*
* @cpu: CPU where the application prefers to the driver to receive CAAM
* responses. The request completion callback would be issued from this
* CPU.
* @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
* context.
*
* Returns a driver context on success or negative error code on failure.
*/
struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
u32 *sh_desc);
/**
* caam_qi_enqueue - Submit a request to QI backend driver.
*
* The request structure must be properly filled as described above.
*
* @qidev: device pointer for QI backend
* @req: CAAM QI request structure
*
* Returns 0 on success or negative error code on failure.
*/
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
/**
* caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
* or too many CAAM responses are pending to be processed.
* @drv_ctx: driver context for which job is to be submitted
*
* Returns caam congestion status 'true/false'
*/
bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
/**
* caam_drv_ctx_update - Update QI driver context
*
* Invoked when shared descriptor is required to be change in driver context.
*
* @drv_ctx: driver context to be updated
* @sh_desc: new shared descriptor pointer to be updated in QI driver context
*
* Returns 0 on success or negative error code on failure.
*/
int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
/**
* caam_drv_ctx_rel - Release a QI driver context
* @drv_ctx: context to be released
*/
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
int caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
*
* Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
* to be allocated on the hotpath. Instead of using malloc, one can use the
* services of the CAAM QI memory cache (backed by kmem_cache). The buffers
* will have a size of 256B, which is sufficient for hosting 16 SG entries.
*
* @flags: flags that would be used for the equivalent malloc(..) call
*
* Returns a pointer to a retrieved buffer on success or NULL on failure.
*/
void *qi_cache_alloc(gfp_t flags);
/**
* qi_cache_free - Frees buffers allocated from CAAM-QI cache
*
* Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
* the buffer previously allocated by a qi_cache_alloc call.
* No checking is being done, the call is a passthrough call to
* kmem_cache_free(...)
*
* @obj: object previously allocated using qi_cache_alloc()
*/
void qi_cache_free(void *obj);
#endif /* __QI_H__ */

Просмотреть файл

@ -0,0 +1,108 @@
/*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2017 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SG_SW_QM_H
#define __SG_SW_QM_H
#include <soc/fsl/qman.h>
#include "regs.h"
static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
u16 offset)
{
qm_sg_entry_set64(qm_sg_ptr, dma);
qm_sg_ptr->__reserved2 = 0;
qm_sg_ptr->bpid = 0;
qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
}
static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
__dma_to_qm_sg(qm_sg_ptr, dma, offset);
qm_sg_entry_set_len(qm_sg_ptr, len);
}
static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
__dma_to_qm_sg(qm_sg_ptr, dma, offset);
qm_sg_entry_set_f(qm_sg_ptr, len);
}
static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
__dma_to_qm_sg(qm_sg_ptr, dma, offset);
qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
}
static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
dma_addr_t dma, u32 len,
u16 offset)
{
__dma_to_qm_sg(qm_sg_ptr, dma, offset);
qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
(len & QM_SG_LEN_MASK));
}
/*
* convert scatterlist to h/w link table format
* but does not have final bit; instead, returns last entry
*/
static inline struct qm_sg_entry *
sg_to_qm_sg(struct scatterlist *sg, int sg_count,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
while (sg_count && sg) {
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
sg_dma_len(sg), offset);
qm_sg_ptr++;
sg = sg_next(sg);
sg_count--;
}
return qm_sg_ptr - 1;
}
/*
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
}
#endif /* __SG_SW_QM_H */

Просмотреть файл

@ -0,0 +1,4 @@
#
# Makefile for Cavium crypto device drivers
#
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/

Просмотреть файл

@ -0,0 +1,11 @@
#
# Makefile for Cavium's ZIP Driver.
#
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
thunderx_zip-y := zip_main.o \
zip_device.o \
zip_crypto.o \
zip_mem.o \
zip_deflate.o \
zip_inflate.o

Просмотреть файл

@ -0,0 +1,202 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#ifndef __COMMON_H__
#define __COMMON_H__
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/version.h>
/* Device specific zlib function definitions */
#include "zip_device.h"
/* ZIP device definitions */
#include "zip_main.h"
/* ZIP memory allocation/deallocation related definitions */
#include "zip_mem.h"
/* Device specific structure definitions */
#include "zip_regs.h"
#define ZIP_ERROR -1
#define ZIP_FLUSH_FINISH 4
#define RAW_FORMAT 0 /* for rawpipe */
#define ZLIB_FORMAT 1 /* for zpipe */
#define GZIP_FORMAT 2 /* for gzpipe */
#define LZS_FORMAT 3 /* for lzspipe */
/* Max number of ZIP devices supported */
#define MAX_ZIP_DEVICES 2
/* Configures the number of zip queues to be used */
#define ZIP_NUM_QUEUES 2
#define DYNAMIC_STOP_EXCESS 1024
/* Maximum buffer sizes in direct mode */
#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024)
/**
* struct zip_operation - common data structure for comp and decomp operations
* @input: Next input byte is read from here
* @output: Next output byte written here
* @ctx_addr: Inflate context buffer address
* @history: Pointer to the history buffer
* @input_len: Number of bytes available at next_in
* @input_total_len: Total number of input bytes read
* @output_len: Remaining free space at next_out
* @output_total_len: Total number of bytes output so far
* @csum: Checksum value of the uncompressed data
* @flush: Flush flag
* @format: Format (depends on stream's wrap)
* @speed: Speed depends on stream's level
* @ccode: Compression code ( stream's strategy)
* @lzs_flag: Flag for LZS support
* @begin_file: Beginning of file indication for inflate
* @history_len: Size of the history data
* @end_file: Ending of the file indication for inflate
* @compcode: Completion status of the ZIP invocation
* @bytes_read: Input bytes read in current instruction
* @bits_processed: Total bits processed for entire file
* @sizeofptr: To distinguish between ILP32 and LP64
* @sizeofzops: Optional just for padding
*
* This structure is used to maintain the required meta data for the
* comp and decomp operations.
*/
struct zip_operation {
u8 *input;
u8 *output;
u64 ctx_addr;
u64 history;
u32 input_len;
u32 input_total_len;
u32 output_len;
u32 output_total_len;
u32 csum;
u32 flush;
u32 format;
u32 speed;
u32 ccode;
u32 lzs_flag;
u32 begin_file;
u32 history_len;
u32 end_file;
u32 compcode;
u32 bytes_read;
u32 bits_processed;
u32 sizeofptr;
u32 sizeofzops;
};
/* error messages */
#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
fmt "\n", __func__, __LINE__, ## args)
#ifdef MSG_ENABLE
/* Enable all messages */
#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
#else
#define zip_msg(fmt, args...)
#endif
#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
#ifdef DEBUG_LEVEL
#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
#if DEBUG_LEVEL >= 4
#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
#elif DEBUG_LEVEL >= 3
#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
#elif DEBUG_LEVEL >= 2
#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
fmt "\n", __func__, __LINE__, ## args)
#else
#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
#endif /* DEBUG LEVEL >=4 */
#else
#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
#endif /* DEBUG_LEVEL */
#else
#define zip_dbg(fmt, args...)
#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
#endif

Просмотреть файл

@ -0,0 +1,313 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#include "zip_crypto.h"
static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
int lzs_flag)
{
zip_ops->flush = ZIP_FLUSH_FINISH;
/* equivalent to level 6 of opensource zlib */
zip_ops->speed = 1;
if (!lzs_flag) {
zip_ops->ccode = 0; /* Auto Huffman */
zip_ops->lzs_flag = 0;
zip_ops->format = ZLIB_FORMAT;
} else {
zip_ops->ccode = 3; /* LZS Encoding */
zip_ops->lzs_flag = 1;
zip_ops->format = LZS_FORMAT;
}
zip_ops->begin_file = 1;
zip_ops->history_len = 0;
zip_ops->end_file = 1;
zip_ops->compcode = 0;
zip_ops->csum = 1; /* Adler checksum desired */
}
int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
{
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
zip_static_init_zip_ops(comp_ctx, lzs_flag);
zip_static_init_zip_ops(decomp_ctx, lzs_flag);
comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
if (!comp_ctx->input)
return -ENOMEM;
comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
if (!comp_ctx->output)
goto err_comp_input;
decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
if (!decomp_ctx->input)
goto err_comp_output;
decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
if (!decomp_ctx->output)
goto err_decomp_input;
return 0;
err_decomp_input:
zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
err_comp_output:
zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
err_comp_input:
zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
return -ENOMEM;
}
void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
}
int zip_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen,
struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *zip_ops = NULL;
struct zip_state zip_state;
struct zip_device *zip = NULL;
int ret;
if (!zip_ctx || !src || !dst || !dlen)
return -ENOMEM;
zip = zip_get_device(zip_get_node_id());
if (!zip)
return -ENODEV;
memset(&zip_state, 0, sizeof(struct zip_state));
zip_ops = &zip_ctx->zip_comp;
zip_ops->input_len = slen;
zip_ops->output_len = *dlen;
memcpy(zip_ops->input, src, slen);
ret = zip_deflate(zip_ops, &zip_state, zip);
if (!ret) {
*dlen = zip_ops->output_len;
memcpy(dst, zip_ops->output, *dlen);
}
return ret;
}
int zip_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen,
struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *zip_ops = NULL;
struct zip_state zip_state;
struct zip_device *zip = NULL;
int ret;
if (!zip_ctx || !src || !dst || !dlen)
return -ENOMEM;
zip = zip_get_device(zip_get_node_id());
if (!zip)
return -ENODEV;
memset(&zip_state, 0, sizeof(struct zip_state));
zip_ops = &zip_ctx->zip_decomp;
memcpy(zip_ops->input, src, slen);
/* Work around for a bug in zlib which needs an extra bytes sometimes */
if (zip_ops->ccode != 3) /* Not LZS Encoding */
zip_ops->input[slen++] = 0;
zip_ops->input_len = slen;
zip_ops->output_len = *dlen;
ret = zip_inflate(zip_ops, &zip_state, zip);
if (!ret) {
*dlen = zip_ops->output_len;
memcpy(dst, zip_ops->output, *dlen);
}
return ret;
}
/* Legacy Compress framework start */
int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
{
int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
ret = zip_ctx_init(zip_ctx, 0);
return ret;
}
int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
{
int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
ret = zip_ctx_init(zip_ctx, 1);
return ret;
}
void zip_free_comp_ctx(struct crypto_tfm *tfm)
{
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
zip_ctx_exit(zip_ctx);
}
int zip_comp_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
ret = zip_compress(src, slen, dst, dlen, zip_ctx);
return ret;
}
int zip_comp_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
return ret;
} /* Legacy compress framework end */
/* SCOMP framework start */
void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm)
{
int ret;
struct zip_kernel_ctx *zip_ctx;
zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
if (!zip_ctx)
return ERR_PTR(-ENOMEM);
ret = zip_ctx_init(zip_ctx, 0);
if (ret) {
kzfree(zip_ctx);
return ERR_PTR(ret);
}
return zip_ctx;
}
void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm)
{
int ret;
struct zip_kernel_ctx *zip_ctx;
zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
if (!zip_ctx)
return ERR_PTR(-ENOMEM);
ret = zip_ctx_init(zip_ctx, 1);
if (ret) {
kzfree(zip_ctx);
return ERR_PTR(ret);
}
return zip_ctx;
}
void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx)
{
struct zip_kernel_ctx *zip_ctx = ctx;
zip_ctx_exit(zip_ctx);
kzfree(zip_ctx);
}
int zip_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
ret = zip_compress(src, slen, dst, dlen, zip_ctx);
return ret;
}
int zip_scomp_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
return ret;
} /* SCOMP framework end */

Просмотреть файл

@ -0,0 +1,79 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#ifndef __ZIP_CRYPTO_H__
#define __ZIP_CRYPTO_H__
#include <linux/crypto.h>
#include <crypto/internal/scompress.h>
#include "common.h"
#include "zip_deflate.h"
#include "zip_inflate.h"
struct zip_kernel_ctx {
struct zip_operation zip_comp;
struct zip_operation zip_decomp;
};
int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm);
int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm);
void zip_free_comp_ctx(struct crypto_tfm *tfm);
int zip_comp_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen);
int zip_comp_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen);
void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm);
void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm);
void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *zip_ctx);
int zip_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx);
int zip_scomp_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx);
#endif

Просмотреть файл

@ -0,0 +1,200 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#include <linux/delay.h>
#include <linux/sched.h>
#include "common.h"
#include "zip_deflate.h"
/* Prepares the deflate zip command */
static int prepare_zip_command(struct zip_operation *zip_ops,
struct zip_state *s, union zip_inst_s *zip_cmd)
{
union zip_zres_s *result_ptr = &s->result;
memset(zip_cmd, 0, sizeof(s->zip_cmd));
memset(result_ptr, 0, sizeof(s->result));
/* IWORD #0 */
/* History gather */
zip_cmd->s.hg = 0;
/* compression enable = 1 for deflate */
zip_cmd->s.ce = 1;
/* sf (sync flush) */
zip_cmd->s.sf = 1;
/* ef (end of file) */
if (zip_ops->flush == ZIP_FLUSH_FINISH) {
zip_cmd->s.ef = 1;
zip_cmd->s.sf = 0;
}
zip_cmd->s.cc = zip_ops->ccode;
/* ss (compression speed/storage) */
zip_cmd->s.ss = zip_ops->speed;
/* IWORD #1 */
/* adler checksum */
zip_cmd->s.adlercrc32 = zip_ops->csum;
zip_cmd->s.historylength = zip_ops->history_len;
zip_cmd->s.dg = 0;
/* IWORD # 6 and 7 - compression input/history pointer */
zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input);
zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
zip_ops->history_len);
zip_cmd->s.ds = 0;
/* IWORD # 8 and 9 - Output pointer */
zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
/* maximum number of output-stream bytes that can be written */
zip_cmd->s.totaloutputlength = zip_ops->output_len;
/* IWORD # 10 and 11 - Result pointer */
zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
/* Clearing completion code */
result_ptr->s.compcode = 0;
return 0;
}
/**
* zip_deflate - API to offload deflate operation to hardware
* @zip_ops: Pointer to zip operation structure
* @s: Pointer to the structure representing zip state
* @zip_dev: Pointer to zip device structure
*
* This function prepares the zip deflate command and submits it to the zip
* engine for processing.
*
* Return: 0 if successful or error code
*/
int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
struct zip_device *zip_dev)
{
union zip_inst_s *zip_cmd = &s->zip_cmd;
union zip_zres_s *result_ptr = &s->result;
u32 queue;
/* Prepares zip command based on the input parameters */
prepare_zip_command(zip_ops, s, zip_cmd);
atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
/* Loads zip command into command queues and rings door bell */
queue = zip_load_instr(zip_cmd, zip_dev);
/* Stats update for compression requests submitted */
atomic64_inc(&zip_dev->stats.comp_req_submit);
while (!result_ptr->s.compcode)
continue;
/* Stats update for compression requests completed */
atomic64_inc(&zip_dev->stats.comp_req_complete);
zip_ops->compcode = result_ptr->s.compcode;
switch (zip_ops->compcode) {
case ZIP_CMD_NOTDONE:
zip_dbg("Zip instruction not yet completed");
return ZIP_ERROR;
case ZIP_CMD_SUCCESS:
zip_dbg("Zip instruction completed successfully");
zip_update_cmd_bufs(zip_dev, queue);
break;
case ZIP_CMD_DTRUNC:
zip_dbg("Output Truncate error");
/* Returning ZIP_ERROR to avoid copy to user */
return ZIP_ERROR;
default:
zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
return ZIP_ERROR;
}
/* Update the CRC depending on the format */
switch (zip_ops->format) {
case RAW_FORMAT:
zip_dbg("RAW Format: %d ", zip_ops->format);
/* Get checksum from engine, need to feed it again */
zip_ops->csum = result_ptr->s.adler32;
break;
case ZLIB_FORMAT:
zip_dbg("ZLIB Format: %d ", zip_ops->format);
zip_ops->csum = result_ptr->s.adler32;
break;
case GZIP_FORMAT:
zip_dbg("GZIP Format: %d ", zip_ops->format);
zip_ops->csum = result_ptr->s.crc32;
break;
case LZS_FORMAT:
zip_dbg("LZS Format: %d ", zip_ops->format);
break;
default:
zip_err("Unknown Format:%d\n", zip_ops->format);
}
atomic64_add(result_ptr->s.totalbyteswritten,
&zip_dev->stats.comp_out_bytes);
/* Update output_len */
if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
/* Dynamic stop && strm->output_len < zipconstants[onfsize] */
zip_err("output_len (%d) < total bytes written(%d)\n",
zip_ops->output_len, result_ptr->s.totalbyteswritten);
zip_ops->output_len = 0;
} else {
zip_ops->output_len = result_ptr->s.totalbyteswritten;
}
return 0;
}

Просмотреть файл

@ -0,0 +1,62 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#ifndef __ZIP_DEFLATE_H__
#define __ZIP_DEFLATE_H__
/**
* zip_deflate - API to offload deflate operation to hardware
* @zip_ops: Pointer to zip operation structure
* @s: Pointer to the structure representing zip state
* @zip_dev: Pointer to the structure representing zip device
*
* This function prepares the zip deflate command and submits it to the zip
* engine by ringing the doorbell.
*
* Return: 0 if successful or error code
*/
int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
struct zip_device *zip_dev);
#endif

Просмотреть файл

@ -0,0 +1,202 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#include "common.h"
#include "zip_deflate.h"
/**
* zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
*
* @zip_dev: Pointer to zip device structure
* @queue: Queue number
*
* Return: Bytes consumed in the command queue buffer.
*/
static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
{
return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
sizeof(u64 *));
}
/**
* zip_load_instr - Submits the instruction into the ZIP command queue
* @instr: Pointer to the instruction to be submitted
* @zip_dev: Pointer to ZIP device structure to which the instruction is to
* be submitted
*
* This function copies the ZIP instruction to the command queue and rings the
* doorbell to notify the engine of the instruction submission. The command
* queue is maintained in a circular fashion. When there is space for exactly
* one instruction in the queue, next chunk pointer of the queue is made to
* point to the head of the queue, thus maintaining a circular queue.
*
* Return: Queue number to which the instruction was submitted
*/
u32 zip_load_instr(union zip_inst_s *instr,
struct zip_device *zip_dev)
{
union zip_quex_doorbell dbell;
u32 queue = 0;
u32 consumed = 0;
u64 *ncb_ptr = NULL;
union zip_nptr_s ncp;
/*
* Distribute the instructions between the enabled queues based on
* the CPU id.
*/
if (smp_processor_id() % 2 == 0)
queue = 0;
else
queue = 1;
zip_dbg("CPU Core: %d Queue number:%d", smp_processor_id(), queue);
/* Take cmd buffer lock */
spin_lock(&zip_dev->iq[queue].lock);
/*
* Command Queue implementation
* 1. If there is place for new instructions, push the cmd at sw_head.
* 2. If there is place for exactly one instruction, push the new cmd
* at the sw_head. Make sw_head point to the sw_tail to make it
* circular. Write sw_head's physical address to the "Next-Chunk
* Buffer Ptr" to make it cmd_hw_tail.
* 3. Ring the door bell.
*/
zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
consumed = zip_cmd_queue_consumed(zip_dev, queue);
/* Check if there is space to push just one cmd */
if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
zip_dbg("Cmd queue space available for single command");
/* Space for one cmd, pust it and make it circular queue */
memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
sizeof(union zip_inst_s));
zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
/* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
ncb_ptr = zip_dev->iq[queue].sw_head;
zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
ncb_ptr, zip_dev->iq[queue].sw_head - 16);
/* Using Circular command queue */
zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
/* Mark this buffer for free */
zip_dev->iq[queue].free_flag = 1;
/* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
ncp.u_reg64 = 0ull;
ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
*ncb_ptr = ncp.u_reg64;
zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
*ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
zip_dev->iq[queue].pend_cnt++;
} else {
zip_dbg("Enough space is available for commands");
/* Push this cmd to cmd queue buffer */
memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
sizeof(union zip_inst_s));
zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
zip_dev->iq[queue].pend_cnt++;
}
zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
zip_dev->iq[queue].hw_tail);
zip_dbg(" Pushed the new cmd : pend_cnt : %d",
zip_dev->iq[queue].pend_cnt);
/* Ring the doorbell */
dbell.u_reg64 = 0ull;
dbell.s.dbell_cnt = 1;
zip_reg_write(dbell.u_reg64,
(zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
/* Unlock cmd buffer lock */
spin_unlock(&zip_dev->iq[queue].lock);
return queue;
}
/**
* zip_update_cmd_bufs - Updates the queue statistics after posting the
* instruction
* @zip_dev: Pointer to zip device structure
* @queue: Queue number
*/
void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
{
/* Take cmd buffer lock */
spin_lock(&zip_dev->iq[queue].lock);
/* Check if the previous buffer can be freed */
if (zip_dev->iq[queue].free_flag == 1) {
zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
/* Reset the free flag */
zip_dev->iq[queue].free_flag = 0;
/* Point the hw_tail to start of the new chunk buffer */
zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
} else {
zip_dbg("Free flag not set. increment hw tail");
zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
}
zip_dev->iq[queue].done_cnt++;
zip_dev->iq[queue].pend_cnt--;
zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
zip_dev->iq[queue].hw_tail);
zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
spin_unlock(&zip_dev->iq[queue].lock);
}

Просмотреть файл

@ -0,0 +1,108 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#ifndef __ZIP_DEVICE_H__
#define __ZIP_DEVICE_H__
#include <linux/types.h>
#include "zip_main.h"
struct sg_info {
/*
* Pointer to the input data when scatter_gather == 0 and
* pointer to the input gather list buffer when scatter_gather == 1
*/
union zip_zptr_s *gather;
/*
* Pointer to the output data when scatter_gather == 0 and
* pointer to the output scatter list buffer when scatter_gather == 1
*/
union zip_zptr_s *scatter;
/*
* Holds size of the output buffer pointed by scatter list
* when scatter_gather == 1
*/
u64 scatter_buf_size;
/* for gather data */
u64 gather_enable;
/* for scatter data */
u64 scatter_enable;
/* Number of gather list pointers for gather data */
u32 gbuf_cnt;
/* Number of scatter list pointers for scatter data */
u32 sbuf_cnt;
/* Buffers allocation state */
u8 alloc_state;
};
/**
* struct zip_state - Structure representing the required information related
* to a command
* @zip_cmd: Pointer to zip instruction structure
* @result: Pointer to zip result structure
* @ctx: Context pointer for inflate
* @history: Decompression history pointer
* @sginfo: Scatter-gather info structure
*/
struct zip_state {
union zip_inst_s zip_cmd;
union zip_zres_s result;
union zip_zptr_s *ctx;
union zip_zptr_s *history;
struct sg_info sginfo;
};
#define ZIP_CONTEXT_SIZE 2048
#define ZIP_INFLATE_HISTORY_SIZE 32768
#define ZIP_DEFLATE_HISTORY_SIZE 32768
#endif

Просмотреть файл

@ -0,0 +1,223 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#include <linux/delay.h>
#include <linux/sched.h>
#include "common.h"
#include "zip_inflate.h"
static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
struct zip_state *s, union zip_inst_s *zip_cmd)
{
union zip_zres_s *result_ptr = &s->result;
memset(zip_cmd, 0, sizeof(s->zip_cmd));
memset(result_ptr, 0, sizeof(s->result));
/* IWORD#0 */
/* Decompression History Gather list - no gather list */
zip_cmd->s.hg = 0;
/* For decompression, CE must be 0x0. */
zip_cmd->s.ce = 0;
/* For decompression, SS must be 0x0. */
zip_cmd->s.ss = 0;
/* For decompression, SF should always be set. */
zip_cmd->s.sf = 1;
/* Begin File */
if (zip_ops->begin_file == 0)
zip_cmd->s.bf = 0;
else
zip_cmd->s.bf = 1;
zip_cmd->s.ef = 1;
/* 0: for Deflate decompression, 3: for LZS decompression */
zip_cmd->s.cc = zip_ops->ccode;
/* IWORD #1*/
/* adler checksum */
zip_cmd->s.adlercrc32 = zip_ops->csum;
/*
* HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
* History data is added to a decompression operation via IWORD3.
*/
zip_cmd->s.historylength = 0;
zip_cmd->s.ds = 0;
/* IWORD # 8 and 9 - Output pointer */
zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
/* Maximum number of output-stream bytes that can be written */
zip_cmd->s.totaloutputlength = zip_ops->output_len;
zip_dbg("Data Direct Input case ");
/* IWORD # 6 and 7 - input pointer */
zip_cmd->s.dg = 0;
zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input);
zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
/* IWORD # 10 and 11 - Result pointer */
zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
/* Clearing completion code */
result_ptr->s.compcode = 0;
/* Returning 0 for time being.*/
return 0;
}
/**
* zip_inflate - API to offload inflate operation to hardware
* @zip_ops: Pointer to zip operation structure
* @s: Pointer to the structure representing zip state
* @zip_dev: Pointer to zip device structure
*
* This function prepares the zip inflate command and submits it to the zip
* engine for processing.
*
* Return: 0 if successful or error code
*/
int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
struct zip_device *zip_dev)
{
union zip_inst_s *zip_cmd = &s->zip_cmd;
union zip_zres_s *result_ptr = &s->result;
u32 queue;
/* Prepare inflate zip command */
prepare_inflate_zcmd(zip_ops, s, zip_cmd);
atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
/* Load inflate command to zip queue and ring the doorbell */
queue = zip_load_instr(zip_cmd, zip_dev);
/* Decompression requests submitted stats update */
atomic64_inc(&zip_dev->stats.decomp_req_submit);
while (!result_ptr->s.compcode)
continue;
/* Decompression requests completed stats update */
atomic64_inc(&zip_dev->stats.decomp_req_complete);
zip_ops->compcode = result_ptr->s.compcode;
switch (zip_ops->compcode) {
case ZIP_CMD_NOTDONE:
zip_dbg("Zip Instruction not yet completed\n");
return ZIP_ERROR;
case ZIP_CMD_SUCCESS:
zip_dbg("Zip Instruction completed successfully\n");
break;
case ZIP_CMD_DYNAMIC_STOP:
zip_dbg(" Dynamic stop Initiated\n");
break;
default:
zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
zip_update_cmd_bufs(zip_dev, queue);
return ZIP_ERROR;
}
zip_update_cmd_bufs(zip_dev, queue);
if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
(zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
result_ptr->s.ef = 1;
zip_ops->csum = result_ptr->s.adler32;
atomic64_add(result_ptr->s.totalbyteswritten,
&zip_dev->stats.decomp_out_bytes);
if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
zip_err("output_len (%d) < total bytes written (%d)\n",
zip_ops->output_len, result_ptr->s.totalbyteswritten);
zip_ops->output_len = 0;
} else {
zip_ops->output_len = result_ptr->s.totalbyteswritten;
}
zip_ops->bytes_read = result_ptr->s.totalbytesread;
zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
zip_ops->end_file = result_ptr->s.ef;
if (zip_ops->end_file) {
switch (zip_ops->format) {
case RAW_FORMAT:
zip_dbg("RAW Format: %d ", zip_ops->format);
/* Get checksum from engine */
zip_ops->csum = result_ptr->s.adler32;
break;
case ZLIB_FORMAT:
zip_dbg("ZLIB Format: %d ", zip_ops->format);
zip_ops->csum = result_ptr->s.adler32;
break;
case GZIP_FORMAT:
zip_dbg("GZIP Format: %d ", zip_ops->format);
zip_ops->csum = result_ptr->s.crc32;
break;
case LZS_FORMAT:
zip_dbg("LZS Format: %d ", zip_ops->format);
break;
default:
zip_err("Format error:%d\n", zip_ops->format);
}
}
return 0;
}

Просмотреть файл

@ -0,0 +1,62 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#ifndef __ZIP_INFLATE_H__
#define __ZIP_INFLATE_H__
/**
* zip_inflate - API to offload inflate operation to hardware
* @zip_ops: Pointer to zip operation structure
* @s: Pointer to the structure representing zip state
* @zip_dev: Pointer to the structure representing zip device
*
* This function prepares the zip inflate command and submits it to the zip
* engine for processing.
*
* Return: 0 if successful or error code
*/
int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
struct zip_device *zip_dev);
#endif

Просмотреть файл

@ -0,0 +1,729 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#include "common.h"
#include "zip_crypto.h"
#define DRV_NAME "ThunderX-ZIP"
static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
static const struct pci_device_id zip_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
{ 0, }
};
void zip_reg_write(u64 val, u64 __iomem *addr)
{
writeq(val, addr);
}
u64 zip_reg_read(u64 __iomem *addr)
{
return readq(addr);
}
/*
* Allocates new ZIP device structure
* Returns zip_device pointer or NULL if cannot allocate memory for zip_device
*/
static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
{
struct zip_device *zip = NULL;
int idx;
for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
if (!zip_dev[idx])
break;
}
/* To ensure that the index is within the limit */
if (idx < MAX_ZIP_DEVICES)
zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
if (!zip)
return NULL;
zip_dev[idx] = zip;
zip->index = idx;
return zip;
}
/**
* zip_get_device - Get ZIP device based on node id of cpu
*
* @node: Node id of the current cpu
* Return: Pointer to Zip device structure
*/
struct zip_device *zip_get_device(int node)
{
if ((node < MAX_ZIP_DEVICES) && (node >= 0))
return zip_dev[node];
zip_err("ZIP device not found for node id %d\n", node);
return NULL;
}
/**
* zip_get_node_id - Get the node id of the current cpu
*
* Return: Node id of the current cpu
*/
int zip_get_node_id(void)
{
return cpu_to_node(smp_processor_id());
}
/* Initializes the ZIP h/w sub-system */
static int zip_init_hw(struct zip_device *zip)
{
union zip_cmd_ctl cmd_ctl;
union zip_constants constants;
union zip_que_ena que_ena;
union zip_quex_map que_map;
union zip_que_pri que_pri;
union zip_quex_sbuf_addr que_sbuf_addr;
union zip_quex_sbuf_ctl que_sbuf_ctl;
int q = 0;
/* Enable the ZIP Engine(Core) Clock */
cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
cmd_ctl.s.forceclk = 1;
zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
zip_msg("ZIP_CMD_CTL : 0x%016llx",
zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
zip->depth = constants.s.depth;
zip->onfsize = constants.s.onfsize;
zip->ctxsize = constants.s.ctxsize;
zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
zip->depth, zip->onfsize, zip->ctxsize);
/*
* Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
* have the correct buffer pointer and size configured for each
* instruction queue.
*/
for (q = 0; q < ZIP_NUM_QUEUES; q++) {
que_sbuf_ctl.u_reg64 = 0ull;
que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
que_sbuf_ctl.s.inst_be = 0;
que_sbuf_ctl.s.stream_id = 0;
zip_reg_write(que_sbuf_ctl.u_reg64,
(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
}
for (q = 0; q < ZIP_NUM_QUEUES; q++) {
memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
spin_lock_init(&zip->iq[q].lock);
if (zip_cmd_qbuf_alloc(zip, q)) {
while (q != 0) {
q--;
zip_cmd_qbuf_free(zip, q);
}
return -ENOMEM;
}
/* Initialize tail ptr to head */
zip->iq[q].sw_tail = zip->iq[q].sw_head;
zip->iq[q].hw_tail = zip->iq[q].sw_head;
/* Write the physical addr to register */
que_sbuf_addr.u_reg64 = 0ull;
que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
ZIP_128B_ALIGN);
zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
(u64)que_sbuf_addr.s.ptr);
zip_reg_write(que_sbuf_addr.u_reg64,
(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
zip->iq[q].sw_head, zip->iq[q].sw_tail,
zip->iq[q].hw_tail);
zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
}
/*
* Queue-to-ZIP core mapping
* If a queue is not mapped to a particular core, it is equivalent to
* the ZIP core being disabled.
*/
que_ena.u_reg64 = 0x0ull;
/* Enabling queues based on ZIP_NUM_QUEUES */
for (q = 0; q < ZIP_NUM_QUEUES; q++)
que_ena.s.ena |= (0x1 << q);
zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
zip_msg("QUE_ENA : 0x%016llx",
zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
for (q = 0; q < ZIP_NUM_QUEUES; q++) {
que_map.u_reg64 = 0ull;
/* Mapping each queue to two ZIP cores */
que_map.s.zce = 0x3;
zip_reg_write(que_map.u_reg64,
(zip->reg_base + ZIP_QUEX_MAP(q)));
zip_msg("QUE_MAP(%d) : 0x%016llx", q,
zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
}
que_pri.u_reg64 = 0ull;
for (q = 0; q < ZIP_NUM_QUEUES; q++)
que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
return 0;
}
static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct zip_device *zip = NULL;
int err;
zip = zip_alloc_device(pdev);
if (!zip)
return -ENOMEM;
dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
pdev->vendor, pdev->device, dev_to_node(dev));
pci_set_drvdata(pdev, zip);
zip->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device");
goto err_free_device;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x", err);
goto err_disable_device;
}
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "Unable to get usable DMA configuration\n");
goto err_release_regions;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "Unable to get 48-bit DMA for allocations\n");
goto err_release_regions;
}
/* MAP configuration registers */
zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
if (!zip->reg_base) {
dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
err = -ENOMEM;
goto err_release_regions;
}
/* Initialize ZIP Hardware */
err = zip_init_hw(zip);
if (err)
goto err_release_regions;
return 0;
err_release_regions:
if (zip->reg_base)
iounmap(zip->reg_base);
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
err_free_device:
pci_set_drvdata(pdev, NULL);
/* Remove zip_dev from zip_device list, free the zip_device memory */
zip_dev[zip->index] = NULL;
devm_kfree(dev, zip);
return err;
}
static void zip_remove(struct pci_dev *pdev)
{
struct zip_device *zip = pci_get_drvdata(pdev);
union zip_cmd_ctl cmd_ctl;
int q = 0;
if (!zip)
return;
if (zip->reg_base) {
cmd_ctl.u_reg64 = 0x0ull;
cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
iounmap(zip->reg_base);
}
pci_release_regions(pdev);
pci_disable_device(pdev);
/*
* Free Command Queue buffers. This free should be called for all
* the enabled Queues.
*/
for (q = 0; q < ZIP_NUM_QUEUES; q++)
zip_cmd_qbuf_free(zip, q);
pci_set_drvdata(pdev, NULL);
/* remove zip device from zip device list */
zip_dev[zip->index] = NULL;
}
/* PCI Sub-System Interface */
static struct pci_driver zip_driver = {
.name = DRV_NAME,
.id_table = zip_id_table,
.probe = zip_probe,
.remove = zip_remove,
};
/* Kernel Crypto Subsystem Interface */
static struct crypto_alg zip_comp_deflate = {
.cra_name = "deflate",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct zip_kernel_ctx),
.cra_priority = 300,
.cra_module = THIS_MODULE,
.cra_init = zip_alloc_comp_ctx_deflate,
.cra_exit = zip_free_comp_ctx,
.cra_u = { .compress = {
.coa_compress = zip_comp_compress,
.coa_decompress = zip_comp_decompress
} }
};
static struct crypto_alg zip_comp_lzs = {
.cra_name = "lzs",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct zip_kernel_ctx),
.cra_priority = 300,
.cra_module = THIS_MODULE,
.cra_init = zip_alloc_comp_ctx_lzs,
.cra_exit = zip_free_comp_ctx,
.cra_u = { .compress = {
.coa_compress = zip_comp_compress,
.coa_decompress = zip_comp_decompress
} }
};
static struct scomp_alg zip_scomp_deflate = {
.alloc_ctx = zip_alloc_scomp_ctx_deflate,
.free_ctx = zip_free_scomp_ctx,
.compress = zip_scomp_compress,
.decompress = zip_scomp_decompress,
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-scomp",
.cra_module = THIS_MODULE,
.cra_priority = 300,
}
};
static struct scomp_alg zip_scomp_lzs = {
.alloc_ctx = zip_alloc_scomp_ctx_lzs,
.free_ctx = zip_free_scomp_ctx,
.compress = zip_scomp_compress,
.decompress = zip_scomp_decompress,
.base = {
.cra_name = "lzs",
.cra_driver_name = "lzs-scomp",
.cra_module = THIS_MODULE,
.cra_priority = 300,
}
};
static int zip_register_compression_device(void)
{
int ret;
ret = crypto_register_alg(&zip_comp_deflate);
if (ret < 0) {
zip_err("Deflate algorithm registration failed\n");
return ret;
}
ret = crypto_register_alg(&zip_comp_lzs);
if (ret < 0) {
zip_err("LZS algorithm registration failed\n");
goto err_unregister_alg_deflate;
}
ret = crypto_register_scomp(&zip_scomp_deflate);
if (ret < 0) {
zip_err("Deflate scomp algorithm registration failed\n");
goto err_unregister_alg_lzs;
}
ret = crypto_register_scomp(&zip_scomp_lzs);
if (ret < 0) {
zip_err("LZS scomp algorithm registration failed\n");
goto err_unregister_scomp_deflate;
}
return ret;
err_unregister_scomp_deflate:
crypto_unregister_scomp(&zip_scomp_deflate);
err_unregister_alg_lzs:
crypto_unregister_alg(&zip_comp_lzs);
err_unregister_alg_deflate:
crypto_unregister_alg(&zip_comp_deflate);
return ret;
}
static void zip_unregister_compression_device(void)
{
crypto_unregister_alg(&zip_comp_deflate);
crypto_unregister_alg(&zip_comp_lzs);
crypto_unregister_scomp(&zip_scomp_deflate);
crypto_unregister_scomp(&zip_scomp_lzs);
}
/*
* debugfs functions
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
/* Displays ZIP device statistics */
static int zip_show_stats(struct seq_file *s, void *unused)
{
u64 val = 0ull;
u64 avg_chunk = 0ull, avg_cr = 0ull;
u32 q = 0;
int index = 0;
struct zip_device *zip;
struct zip_stats *st;
for (index = 0; index < MAX_ZIP_DEVICES; index++) {
if (zip_dev[index]) {
zip = zip_dev[index];
st = &zip->stats;
/* Get all the pending requests */
for (q = 0; q < ZIP_NUM_QUEUES; q++) {
val = zip_reg_read((zip->reg_base +
ZIP_DBG_COREX_STA(q)));
val = (val >> 32);
val = val & 0xffffff;
atomic64_add(val, &st->pending_req);
}
avg_chunk = (atomic64_read(&st->comp_in_bytes) /
atomic64_read(&st->comp_req_complete));
avg_cr = (atomic64_read(&st->comp_in_bytes) /
atomic64_read(&st->comp_out_bytes));
seq_printf(s, " ZIP Device %d Stats\n"
"-----------------------------------\n"
"Comp Req Submitted : \t%lld\n"
"Comp Req Completed : \t%lld\n"
"Compress In Bytes : \t%lld\n"
"Compressed Out Bytes : \t%lld\n"
"Average Chunk size : \t%llu\n"
"Average Compression ratio : \t%llu\n"
"Decomp Req Submitted : \t%lld\n"
"Decomp Req Completed : \t%lld\n"
"Decompress In Bytes : \t%lld\n"
"Decompressed Out Bytes : \t%lld\n"
"Decompress Bad requests : \t%lld\n"
"Pending Req : \t%lld\n"
"---------------------------------\n",
index,
(u64)atomic64_read(&st->comp_req_submit),
(u64)atomic64_read(&st->comp_req_complete),
(u64)atomic64_read(&st->comp_in_bytes),
(u64)atomic64_read(&st->comp_out_bytes),
avg_chunk,
avg_cr,
(u64)atomic64_read(&st->decomp_req_submit),
(u64)atomic64_read(&st->decomp_req_complete),
(u64)atomic64_read(&st->decomp_in_bytes),
(u64)atomic64_read(&st->decomp_out_bytes),
(u64)atomic64_read(&st->decomp_bad_reqs),
(u64)atomic64_read(&st->pending_req));
/* Reset pending requests count */
atomic64_set(&st->pending_req, 0);
}
}
return 0;
}
/* Clears stats data */
static int zip_clear_stats(struct seq_file *s, void *unused)
{
int index = 0;
for (index = 0; index < MAX_ZIP_DEVICES; index++) {
if (zip_dev[index]) {
memset(&zip_dev[index]->stats, 0,
sizeof(struct zip_stats));
seq_printf(s, "Cleared stats for zip %d\n", index);
}
}
return 0;
}
static struct zip_registers zipregs[64] = {
{"ZIP_CMD_CTL ", 0x0000ull},
{"ZIP_THROTTLE ", 0x0010ull},
{"ZIP_CONSTANTS ", 0x00A0ull},
{"ZIP_QUE0_MAP ", 0x1400ull},
{"ZIP_QUE1_MAP ", 0x1408ull},
{"ZIP_QUE_ENA ", 0x0500ull},
{"ZIP_QUE_PRI ", 0x0508ull},
{"ZIP_QUE0_DONE ", 0x2000ull},
{"ZIP_QUE1_DONE ", 0x2008ull},
{"ZIP_QUE0_DOORBELL ", 0x4000ull},
{"ZIP_QUE1_DOORBELL ", 0x4008ull},
{"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
{"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
{"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
{"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
{ NULL, 0}
};
/* Prints registers' contents */
static int zip_print_regs(struct seq_file *s, void *unused)
{
u64 val = 0;
int i = 0, index = 0;
for (index = 0; index < MAX_ZIP_DEVICES; index++) {
if (zip_dev[index]) {
seq_printf(s, "--------------------------------\n"
" ZIP Device %d Registers\n"
"--------------------------------\n",
index);
i = 0;
while (zipregs[i].reg_name) {
val = zip_reg_read((zip_dev[index]->reg_base +
zipregs[i].reg_offset));
seq_printf(s, "%s: 0x%016llx\n",
zipregs[i].reg_name, val);
i++;
}
}
}
return 0;
}
static int zip_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, zip_show_stats, NULL);
}
static const struct file_operations zip_stats_fops = {
.owner = THIS_MODULE,
.open = zip_stats_open,
.read = seq_read,
};
static int zip_clear_open(struct inode *inode, struct file *file)
{
return single_open(file, zip_clear_stats, NULL);
}
static const struct file_operations zip_clear_fops = {
.owner = THIS_MODULE,
.open = zip_clear_open,
.read = seq_read,
};
static int zip_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, zip_print_regs, NULL);
}
static const struct file_operations zip_regs_fops = {
.owner = THIS_MODULE,
.open = zip_regs_open,
.read = seq_read,
};
/* Root directory for thunderx_zip debugfs entry */
static struct dentry *zip_debugfs_root;
static int __init zip_debugfs_init(void)
{
struct dentry *zip_stats, *zip_clear, *zip_regs;
if (!debugfs_initialized())
return -ENODEV;
zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
if (!zip_debugfs_root)
return -ENOMEM;
/* Creating files for entries inside thunderx_zip directory */
zip_stats = debugfs_create_file("zip_stats", 0444,
zip_debugfs_root,
NULL, &zip_stats_fops);
if (!zip_stats)
goto failed_to_create;
zip_clear = debugfs_create_file("zip_clear", 0444,
zip_debugfs_root,
NULL, &zip_clear_fops);
if (!zip_clear)
goto failed_to_create;
zip_regs = debugfs_create_file("zip_regs", 0444,
zip_debugfs_root,
NULL, &zip_regs_fops);
if (!zip_regs)
goto failed_to_create;
return 0;
failed_to_create:
debugfs_remove_recursive(zip_debugfs_root);
return -ENOENT;
}
static void __exit zip_debugfs_exit(void)
{
debugfs_remove_recursive(zip_debugfs_root);
}
#else
static int __init zip_debugfs_init(void)
{
return 0;
}
static void __exit zip_debugfs_exit(void) { }
#endif
/* debugfs - end */
static int __init zip_init_module(void)
{
int ret;
zip_msg("%s\n", DRV_NAME);
ret = pci_register_driver(&zip_driver);
if (ret < 0) {
zip_err("ZIP: pci_register_driver() failed\n");
return ret;
}
/* Register with the Kernel Crypto Interface */
ret = zip_register_compression_device();
if (ret < 0) {
zip_err("ZIP: Kernel Crypto Registration failed\n");
goto err_pci_unregister;
}
/* comp-decomp statistics are handled with debugfs interface */
ret = zip_debugfs_init();
if (ret < 0) {
zip_err("ZIP: debugfs initialization failed\n");
goto err_crypto_unregister;
}
return ret;
err_crypto_unregister:
zip_unregister_compression_device();
err_pci_unregister:
pci_unregister_driver(&zip_driver);
return ret;
}
static void __exit zip_cleanup_module(void)
{
zip_debugfs_exit();
/* Unregister from the kernel crypto interface */
zip_unregister_compression_device();
/* Unregister this driver for pci zip devices */
pci_unregister_driver(&zip_driver);
}
module_init(zip_init_module);
module_exit(zip_cleanup_module);
MODULE_AUTHOR("Cavium Inc");
MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, zip_id_table);

Просмотреть файл

@ -0,0 +1,121 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#ifndef __ZIP_MAIN_H__
#define __ZIP_MAIN_H__
#include "zip_device.h"
#include "zip_regs.h"
/* PCI device IDs */
#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A
/* ZIP device BARs */
#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */
/* Maximum available zip queues */
#define ZIP_MAX_NUM_QUEUES 8
#define ZIP_128B_ALIGN 7
/* Command queue buffer size */
#define ZIP_CMD_QBUF_SIZE (8064 + 8)
struct zip_registers {
char *reg_name;
u64 reg_offset;
};
/* ZIP Compression - Decompression stats */
struct zip_stats {
atomic64_t comp_req_submit;
atomic64_t comp_req_complete;
atomic64_t decomp_req_submit;
atomic64_t decomp_req_complete;
atomic64_t pending_req;
atomic64_t comp_in_bytes;
atomic64_t comp_out_bytes;
atomic64_t decomp_in_bytes;
atomic64_t decomp_out_bytes;
atomic64_t decomp_bad_reqs;
};
/* ZIP Instruction Queue */
struct zip_iq {
u64 *sw_head;
u64 *sw_tail;
u64 *hw_tail;
u64 done_cnt;
u64 pend_cnt;
u64 free_flag;
/* ZIP IQ lock */
spinlock_t lock;
};
/* ZIP Device */
struct zip_device {
u32 index;
void __iomem *reg_base;
struct pci_dev *pdev;
/* Different ZIP Constants */
u64 depth;
u64 onfsize;
u64 ctxsize;
struct zip_iq iq[ZIP_MAX_NUM_QUEUES];
struct zip_stats stats;
};
/* Prototypes */
struct zip_device *zip_get_device(int node_id);
int zip_get_node_id(void);
void zip_reg_write(u64 val, u64 __iomem *addr);
u64 zip_reg_read(u64 __iomem *addr);
void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
#endif /* ZIP_MAIN_H */

Просмотреть файл

@ -0,0 +1,114 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#include <linux/types.h>
#include <linux/vmalloc.h>
#include "common.h"
/**
* zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
* @zip: Pointer to zip device structure
* @q: Queue number to allocate bufffer to
* Return: 0 if successful, -ENOMEM otherwise
*/
int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
{
zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
get_order(ZIP_CMD_QBUF_SIZE));
if (!zip->iq[q].sw_head)
return -ENOMEM;
memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
return 0;
}
/**
* zip_cmd_qbuf_free - Frees the cmd Queue buffer
* @zip: Pointer to zip device structure
* @q: Queue number to free buffer of
*/
void zip_cmd_qbuf_free(struct zip_device *zip, int q)
{
zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
}
/**
* zip_data_buf_alloc - Allocates memory for a data bufffer
* @size: Size of the buffer to allocate
* Returns: Pointer to the buffer allocated
*/
u8 *zip_data_buf_alloc(u64 size)
{
u8 *ptr;
ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
get_order(size));
if (!ptr)
return NULL;
memset(ptr, 0, size);
zip_dbg("Data buffer allocation success\n");
return ptr;
}
/**
* zip_data_buf_free - Frees the memory of a data buffer
* @ptr: Pointer to the buffer
* @size: Buffer size
*/
void zip_data_buf_free(u8 *ptr, u64 size)
{
zip_dbg("Freeing data buffer 0x%lx\n", ptr);
free_pages((u64)ptr, get_order(size));
}

Просмотреть файл

@ -0,0 +1,78 @@
/***********************license start************************************
* Copyright (c) 2003-2017 Cavium, Inc.
* All rights reserved.
*
* License: one of 'Cavium License' or 'GNU General Public License Version 2'
*
* This file is provided under the terms of the Cavium License (see below)
* or under the terms of GNU General Public License, Version 2, as
* published by the Free Software Foundation. When using or redistributing
* this file, you may do so under either license.
*
* Cavium License: Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
* associated regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
* WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
* PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
* ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
* ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
* WITH YOU.
***********************license end**************************************/
#ifndef __ZIP_MEM_H__
#define __ZIP_MEM_H__
/**
* zip_cmd_qbuf_free - Frees the cmd Queue buffer
* @zip: Pointer to zip device structure
* @q: Queue nmber to free buffer of
*/
void zip_cmd_qbuf_free(struct zip_device *zip, int q);
/**
* zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
* @zip: Pointer to zip device structure
* @q: Queue number to allocate bufffer to
* Return: 0 if successful, 1 otherwise
*/
int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
/**
* zip_data_buf_alloc - Allocates memory for a data bufffer
* @size: Size of the buffer to allocate
* Returns: Pointer to the buffer allocated
*/
u8 *zip_data_buf_alloc(u64 size);
/**
* zip_data_buf_free - Frees the memory of a data buffer
* @ptr: Pointer to the buffer
* @size: Buffer size
*/
void zip_data_buf_free(u8 *ptr, u64 size);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -12,4 +12,6 @@ ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes.o \
ccp-crypto-aes-cmac.o \
ccp-crypto-aes-xts.o \
ccp-crypto-aes-galois.o \
ccp-crypto-des3.o \
ccp-crypto-sha.o

Просмотреть файл

@ -0,0 +1,252 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support
*
* Copyright (C) 2016 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/scatterwalk.h>
#include <linux/delay.h>
#include "ccp-crypto.h"
#define AES_GCM_IVSIZE 12
static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
{
return ret;
}
static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
ctx->u.aes.type = CCP_AES_TYPE_128;
break;
case AES_KEYSIZE_192:
ctx->u.aes.type = CCP_AES_TYPE_192;
break;
case AES_KEYSIZE_256:
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = CCP_AES_MODE_GCM;
ctx->u.aes.key_len = key_len;
memcpy(ctx->u.aes.key, key, key_len);
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
return 0;
}
static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
return 0;
}
static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int i;
int ret = 0;
if (!ctx->u.aes.key_len)
return -EINVAL;
if (ctx->u.aes.mode != CCP_AES_MODE_GCM)
return -EINVAL;
if (!req->iv)
return -EINVAL;
/*
* 5 parts:
* plaintext/ciphertext input
* AAD
* key
* IV
* Destination+tag buffer
*/
/* Prepare the IV: 12 bytes + an integer (counter) */
memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
for (i = 0; i < 3; i++)
rctx->iv[i + AES_GCM_IVSIZE] = 0;
rctx->iv[AES_BLOCK_SIZE - 1] = 1;
/* Set up a scatterlist for the IV */
iv_sg = &rctx->iv_sg;
iv_len = AES_BLOCK_SIZE;
sg_init_one(iv_sg, rctx->iv, iv_len);
/* The AAD + plaintext are concatenated in the src buffer */
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_AES;
rctx->cmd.u.aes.type = ctx->u.aes.type;
rctx->cmd.u.aes.mode = ctx->u.aes.mode;
rctx->cmd.u.aes.action = encrypt;
rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
rctx->cmd.u.aes.iv = iv_sg;
rctx->cmd.u.aes.iv_len = iv_len;
rctx->cmd.u.aes.src = req->src;
rctx->cmd.u.aes.src_len = req->cryptlen;
rctx->cmd.u.aes.aad_len = req->assoclen;
/* The cipher text + the tag are in the dst buffer */
rctx->cmd.u.aes.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
}
static int ccp_aes_gcm_encrypt(struct aead_request *req)
{
return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_ENCRYPT);
}
static int ccp_aes_gcm_decrypt(struct aead_request *req)
{
return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_DECRYPT);
}
static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm)
{
struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
ctx->complete = ccp_aes_gcm_complete;
ctx->u.aes.key_len = 0;
crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm)
{
}
static struct aead_alg ccp_aes_gcm_defaults = {
.setkey = ccp_aes_gcm_setkey,
.setauthsize = ccp_aes_gcm_setauthsize,
.encrypt = ccp_aes_gcm_encrypt,
.decrypt = ccp_aes_gcm_decrypt,
.init = ccp_aes_gcm_cra_init,
.ivsize = AES_GCM_IVSIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ccp_ctx),
.cra_priority = CCP_CRA_PRIORITY,
.cra_type = &crypto_ablkcipher_type,
.cra_exit = ccp_aes_gcm_cra_exit,
.cra_module = THIS_MODULE,
},
};
struct ccp_aes_aead_def {
enum ccp_aes_mode mode;
unsigned int version;
const char *name;
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
struct aead_alg *alg_defaults;
};
static struct ccp_aes_aead_def aes_aead_algs[] = {
{
.mode = CCP_AES_MODE_GHASH,
.version = CCP_VERSION(5, 0),
.name = "gcm(aes)",
.driver_name = "gcm-aes-ccp",
.blocksize = 1,
.ivsize = AES_BLOCK_SIZE,
.alg_defaults = &ccp_aes_gcm_defaults,
},
};
static int ccp_register_aes_aead(struct list_head *head,
const struct ccp_aes_aead_def *def)
{
struct ccp_crypto_aead *ccp_aead;
struct aead_alg *alg;
int ret;
ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL);
if (!ccp_aead)
return -ENOMEM;
INIT_LIST_HEAD(&ccp_aead->entry);
ccp_aead->mode = def->mode;
/* Copy the defaults and override as necessary */
alg = &ccp_aead->alg;
*alg = *def->alg_defaults;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
alg->base.cra_blocksize = def->blocksize;
alg->base.cra_ablkcipher.ivsize = def->ivsize;
ret = crypto_register_aead(alg);
if (ret) {
pr_err("%s ablkcipher algorithm registration error (%d)\n",
alg->base.cra_name, ret);
kfree(ccp_aead);
return ret;
}
list_add(&ccp_aead->entry, head);
return 0;
}
int ccp_register_aes_aeads(struct list_head *head)
{
int i, ret;
unsigned int ccpversion = ccp_version();
for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) {
if (aes_aead_algs[i].version > ccpversion)
continue;
ret = ccp_register_aes_aead(head, &aes_aead_algs[i]);
if (ret)
return ret;
}
return 0;
}

Просмотреть файл

@ -0,0 +1,254 @@
/*
* AMD Cryptographic Coprocessor (CCP) DES3 crypto API support
*
* Copyright (C) 2016 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <ghook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <crypto/des.h>
#include "ccp-crypto.h"
static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
{
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
if (ret)
return ret;
if (ctx->u.des3.mode != CCP_DES3_MODE_ECB)
memcpy(req->info, rctx->iv, DES3_EDE_BLOCK_SIZE);
return 0;
}
static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
struct ccp_crypto_ablkcipher_alg *alg =
ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
u32 *flags = &tfm->base.crt_flags;
/* From des_generic.c:
*
* RFC2451:
* If the first two or last two independent 64-bit keys are
* equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
* same as DES. Implementers MUST reject keys that exhibit this
* property.
*/
const u32 *K = (const u32 *)key;
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
/* It's not clear that there is any support for a keysize of 112.
* If needed, the caller should make K1 == K3
*/
ctx->u.des3.type = CCP_DES3_TYPE_168;
ctx->u.des3.mode = alg->mode;
ctx->u.des3.key_len = key_len;
memcpy(ctx->u.des3.key, key, key_len);
sg_init_one(&ctx->u.des3.key_sg, ctx->u.des3.key, key_len);
return 0;
}
static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int ret;
if (!ctx->u.des3.key_len)
return -EINVAL;
if (((ctx->u.des3.mode == CCP_DES3_MODE_ECB) ||
(ctx->u.des3.mode == CCP_DES3_MODE_CBC)) &&
(req->nbytes & (DES3_EDE_BLOCK_SIZE - 1)))
return -EINVAL;
if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) {
if (!req->info)
return -EINVAL;
memcpy(rctx->iv, req->info, DES3_EDE_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;
iv_len = DES3_EDE_BLOCK_SIZE;
sg_init_one(iv_sg, rctx->iv, iv_len);
}
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_DES3;
rctx->cmd.u.des3.type = ctx->u.des3.type;
rctx->cmd.u.des3.mode = ctx->u.des3.mode;
rctx->cmd.u.des3.action = (encrypt)
? CCP_DES3_ACTION_ENCRYPT
: CCP_DES3_ACTION_DECRYPT;
rctx->cmd.u.des3.key = &ctx->u.des3.key_sg;
rctx->cmd.u.des3.key_len = ctx->u.des3.key_len;
rctx->cmd.u.des3.iv = iv_sg;
rctx->cmd.u.des3.iv_len = iv_len;
rctx->cmd.u.des3.src = req->src;
rctx->cmd.u.des3.src_len = req->nbytes;
rctx->cmd.u.des3.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
}
static int ccp_des3_encrypt(struct ablkcipher_request *req)
{
return ccp_des3_crypt(req, true);
}
static int ccp_des3_decrypt(struct ablkcipher_request *req)
{
return ccp_des3_crypt(req, false);
}
static int ccp_des3_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->complete = ccp_des3_complete;
ctx->u.des3.key_len = 0;
tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_des3_req_ctx);
return 0;
}
static void ccp_des3_cra_exit(struct crypto_tfm *tfm)
{
}
static struct crypto_alg ccp_des3_defaults = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ccp_ctx),
.cra_priority = CCP_CRA_PRIORITY,
.cra_type = &crypto_ablkcipher_type,
.cra_init = ccp_des3_cra_init,
.cra_exit = ccp_des3_cra_exit,
.cra_module = THIS_MODULE,
.cra_ablkcipher = {
.setkey = ccp_des3_setkey,
.encrypt = ccp_des3_encrypt,
.decrypt = ccp_des3_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
},
};
struct ccp_des3_def {
enum ccp_des3_mode mode;
unsigned int version;
const char *name;
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
struct crypto_alg *alg_defaults;
};
static struct ccp_des3_def des3_algs[] = {
{
.mode = CCP_DES3_MODE_ECB,
.version = CCP_VERSION(5, 0),
.name = "ecb(des3_ede)",
.driver_name = "ecb-des3-ccp",
.blocksize = DES3_EDE_BLOCK_SIZE,
.ivsize = 0,
.alg_defaults = &ccp_des3_defaults,
},
{
.mode = CCP_DES3_MODE_CBC,
.version = CCP_VERSION(5, 0),
.name = "cbc(des3_ede)",
.driver_name = "cbc-des3-ccp",
.blocksize = DES3_EDE_BLOCK_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.alg_defaults = &ccp_des3_defaults,
},
};
static int ccp_register_des3_alg(struct list_head *head,
const struct ccp_des3_def *def)
{
struct ccp_crypto_ablkcipher_alg *ccp_alg;
struct crypto_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
if (!ccp_alg)
return -ENOMEM;
INIT_LIST_HEAD(&ccp_alg->entry);
ccp_alg->mode = def->mode;
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
alg->cra_blocksize = def->blocksize;
alg->cra_ablkcipher.ivsize = def->ivsize;
ret = crypto_register_alg(alg);
if (ret) {
pr_err("%s ablkcipher algorithm registration error (%d)\n",
alg->cra_name, ret);
kfree(ccp_alg);
return ret;
}
list_add(&ccp_alg->entry, head);
return 0;
}
int ccp_register_des3_algs(struct list_head *head)
{
int i, ret;
unsigned int ccpversion = ccp_version();
for (i = 0; i < ARRAY_SIZE(des3_algs); i++) {
if (des3_algs[i].version > ccpversion)
continue;
ret = ccp_register_des3_alg(head, &des3_algs[i]);
if (ret)
return ret;
}
return 0;
}

Просмотреть файл

@ -33,9 +33,14 @@ static unsigned int sha_disable;
module_param(sha_disable, uint, 0444);
MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
static unsigned int des3_disable;
module_param(des3_disable, uint, 0444);
MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
static LIST_HEAD(cipher_algs);
static LIST_HEAD(aead_algs);
/* For any tfm, requests for that tfm must be returned on the order
* received. With multiple queues available, the CCP can process more
@ -335,6 +340,16 @@ static int ccp_register_algs(void)
ret = ccp_register_aes_xts_algs(&cipher_algs);
if (ret)
return ret;
ret = ccp_register_aes_aeads(&aead_algs);
if (ret)
return ret;
}
if (!des3_disable) {
ret = ccp_register_des3_algs(&cipher_algs);
if (ret)
return ret;
}
if (!sha_disable) {
@ -350,6 +365,7 @@ static void ccp_unregister_algs(void)
{
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
struct ccp_crypto_aead *aead_alg, *aead_tmp;
list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
crypto_unregister_ahash(&ahash_alg->alg);
@ -362,6 +378,12 @@ static void ccp_unregister_algs(void)
list_del(&ablk_alg->entry);
kfree(ablk_alg);
}
list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
crypto_unregister_aead(&aead_alg->alg);
list_del(&aead_alg->entry);
kfree(aead_alg);
}
}
static int ccp_crypto_init(void)

Просмотреть файл

@ -146,6 +146,12 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
case CCP_SHA_TYPE_256:
rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_384:
rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_512:
rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE;
break;
default:
/* Should never get here */
break;
@ -393,6 +399,22 @@ static struct ccp_sha_def sha_algs[] = {
.digest_size = SHA256_DIGEST_SIZE,
.block_size = SHA256_BLOCK_SIZE,
},
{
.version = CCP_VERSION(5, 0),
.name = "sha384",
.drv_name = "sha384-ccp",
.type = CCP_SHA_TYPE_384,
.digest_size = SHA384_DIGEST_SIZE,
.block_size = SHA384_BLOCK_SIZE,
},
{
.version = CCP_VERSION(5, 0),
.name = "sha512",
.drv_name = "sha512-ccp",
.type = CCP_SHA_TYPE_512,
.digest_size = SHA512_DIGEST_SIZE,
.block_size = SHA512_BLOCK_SIZE,
},
};
static int ccp_register_hmac_alg(struct list_head *head,

Просмотреть файл

@ -19,10 +19,14 @@
#include <linux/ccp.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/internal/aead.h>
#include <crypto/aead.h>
#include <crypto/ctr.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#define CCP_LOG_LEVEL KERN_INFO
#define CCP_CRA_PRIORITY 300
struct ccp_crypto_ablkcipher_alg {
@ -33,6 +37,14 @@ struct ccp_crypto_ablkcipher_alg {
struct crypto_alg alg;
};
struct ccp_crypto_aead {
struct list_head entry;
u32 mode;
struct aead_alg alg;
};
struct ccp_crypto_ahash_alg {
struct list_head entry;
@ -95,6 +107,9 @@ struct ccp_aes_req_ctx {
struct scatterlist iv_sg;
u8 iv[AES_BLOCK_SIZE];
struct scatterlist tag_sg;
u8 tag[AES_BLOCK_SIZE];
/* Fields used for RFC3686 requests */
u8 *rfc3686_info;
u8 rfc3686_iv[AES_BLOCK_SIZE];
@ -137,9 +152,29 @@ struct ccp_aes_cmac_exp_ctx {
u8 buf[AES_BLOCK_SIZE];
};
/***** SHA related defines *****/
#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
/***** 3DES related defines *****/
struct ccp_des3_ctx {
enum ccp_engine engine;
enum ccp_des3_type type;
enum ccp_des3_mode mode;
struct scatterlist key_sg;
unsigned int key_len;
u8 key[AES_MAX_KEY_SIZE];
};
struct ccp_des3_req_ctx {
struct scatterlist iv_sg;
u8 iv[AES_BLOCK_SIZE];
struct ccp_cmd cmd;
};
/* SHA-related defines
* These values must be large enough to accommodate any variant
*/
#define MAX_SHA_CONTEXT_SIZE SHA512_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA512_BLOCK_SIZE
struct ccp_sha_ctx {
struct scatterlist opad_sg;
@ -199,6 +234,7 @@ struct ccp_ctx {
union {
struct ccp_aes_ctx aes;
struct ccp_sha_ctx sha;
struct ccp_des3_ctx des3;
} u;
};
@ -210,6 +246,8 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
int ccp_register_aes_algs(struct list_head *head);
int ccp_register_aes_cmac_algs(struct list_head *head);
int ccp_register_aes_xts_algs(struct list_head *head);
int ccp_register_aes_aeads(struct list_head *head);
int ccp_register_sha_algs(struct list_head *head);
int ccp_register_des3_algs(struct list_head *head);
#endif

Просмотреть файл

@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op)
return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
}
static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
{
iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
}
static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
{
iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
}
static void ccp_irq_bh(unsigned long data)
{
struct ccp_device *ccp = (struct ccp_device *)data;
struct ccp_cmd_queue *cmd_q;
u32 q_int, status;
unsigned int i;
status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
q_int = status & (cmd_q->int_ok | cmd_q->int_err);
if (q_int) {
cmd_q->int_status = status;
cmd_q->q_status = ioread32(cmd_q->reg_status);
cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
/* On error, only save the first error value */
if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
cmd_q->int_rcvd = 1;
/* Acknowledge the interrupt and wake the kthread */
iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
wake_up_interruptible(&cmd_q->int_queue);
}
}
ccp_enable_queue_interrupts(ccp);
}
static irqreturn_t ccp_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct ccp_device *ccp = dev_get_drvdata(dev);
ccp_disable_queue_interrupts(ccp);
if (ccp->use_tasklet)
tasklet_schedule(&ccp->irq_tasklet);
else
ccp_irq_bh((unsigned long)ccp);
return IRQ_HANDLED;
}
static int ccp_init(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct ccp_cmd_queue *cmd_q;
struct dma_pool *dma_pool;
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
unsigned int qmr, qim, i;
unsigned int qmr, i;
int ret;
/* Find available queues */
qim = 0;
ccp->qim = 0;
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
for (i = 0; i < MAX_HW_QUEUES; i++) {
if (!(qmr & (1 << i)))
@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp)
init_waitqueue_head(&cmd_q->int_queue);
/* Build queue interrupt mask (two interrupts per queue) */
qim |= cmd_q->int_ok | cmd_q->int_err;
ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
#ifdef CONFIG_ARM64
/* For arm64 set the recommended queue cache settings */
@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp)
dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
/* Disable and clear interrupts until ready */
iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
ccp_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
}
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
/* Request an irq */
ret = ccp->get_irq(ccp);
@ -404,6 +460,11 @@ static int ccp_init(struct ccp_device *ccp)
goto e_pool;
}
/* Initialize the ISR tasklet? */
if (ccp->use_tasklet)
tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
(unsigned long)ccp);
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
@ -426,7 +487,7 @@ static int ccp_init(struct ccp_device *ccp)
dev_dbg(dev, "Enabling interrupts...\n");
/* Enable interrupts */
iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
ccp_enable_queue_interrupts(ccp);
dev_dbg(dev, "Registering device...\n");
ccp_add_device(ccp);
@ -463,7 +524,7 @@ static void ccp_destroy(struct ccp_device *ccp)
{
struct ccp_cmd_queue *cmd_q;
struct ccp_cmd *cmd;
unsigned int qim, i;
unsigned int i;
/* Unregister the DMA engine */
ccp_dmaengine_unregister(ccp);
@ -474,22 +535,15 @@ static void ccp_destroy(struct ccp_device *ccp)
/* Remove this device from the list of available units */
ccp_del_device(ccp);
/* Build queue interrupt mask (two interrupt masks per queue) */
qim = 0;
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
qim |= cmd_q->int_ok | cmd_q->int_err;
}
/* Disable and clear interrupts */
iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
ccp_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
}
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
/* Stop the queue kthreads */
for (i = 0; i < ccp->cmd_q_count; i++)
@ -516,43 +570,10 @@ static void ccp_destroy(struct ccp_device *ccp)
}
}
static irqreturn_t ccp_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct ccp_device *ccp = dev_get_drvdata(dev);
struct ccp_cmd_queue *cmd_q;
u32 q_int, status;
unsigned int i;
status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
q_int = status & (cmd_q->int_ok | cmd_q->int_err);
if (q_int) {
cmd_q->int_status = status;
cmd_q->q_status = ioread32(cmd_q->reg_status);
cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
/* On error, only save the first error value */
if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
cmd_q->int_rcvd = 1;
/* Acknowledge the interrupt and wake the kthread */
iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
wake_up_interruptible(&cmd_q->int_queue);
}
}
return IRQ_HANDLED;
}
static const struct ccp_actions ccp3_actions = {
.aes = ccp_perform_aes,
.xts_aes = ccp_perform_xts_aes,
.des3 = NULL,
.sha = ccp_perform_sha,
.rsa = ccp_perform_rsa,
.passthru = ccp_perform_passthru,

Просмотреть файл

@ -107,6 +107,12 @@ union ccp_function {
u16 rsvd:5;
u16 type:2;
} aes_xts;
struct {
u16 size:7;
u16 encrypt:1;
u16 mode:5;
u16 type:2;
} des3;
struct {
u16 rsvd1:10;
u16 type:4;
@ -139,6 +145,10 @@ union ccp_function {
#define CCP_AES_TYPE(p) ((p)->aes.type)
#define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
#define CCP_DES3_SIZE(p) ((p)->des3.size)
#define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
#define CCP_DES3_MODE(p) ((p)->des3.mode)
#define CCP_DES3_TYPE(p) ((p)->des3.type)
#define CCP_SHA_TYPE(p) ((p)->sha.type)
#define CCP_RSA_SIZE(p) ((p)->rsa.size)
#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
@ -388,6 +398,47 @@ static int ccp5_perform_sha(struct ccp_op *op)
return ccp5_do_cmd(&desc, op->cmd_q);
}
static int ccp5_perform_des3(struct ccp_op *op)
{
struct ccp5_desc desc;
union ccp_function function;
u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
/* Zero out all the fields of the command desc */
memset(&desc, 0, sizeof(struct ccp5_desc));
CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
CCP5_CMD_SOC(&desc) = op->soc;
CCP5_CMD_IOC(&desc) = 1;
CCP5_CMD_INIT(&desc) = op->init;
CCP5_CMD_EOM(&desc) = op->eom;
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
CCP_DES3_MODE(&function) = op->u.des3.mode;
CCP_DES3_TYPE(&function) = op->u.des3.type;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
CCP5_CMD_KEY_HI(&desc) = 0;
CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
return ccp5_do_cmd(&desc, op->cmd_q);
}
static int ccp5_perform_rsa(struct ccp_op *op)
{
struct ccp5_desc desc;
@ -435,6 +486,7 @@ static int ccp5_perform_passthru(struct ccp_op *op)
struct ccp_dma_info *saddr = &op->src.u.dma;
struct ccp_dma_info *daddr = &op->dst.u.dma;
memset(&desc, 0, Q_DESC_SIZE);
CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
@ -653,6 +705,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
return rc;
}
static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
{
unsigned int i;
for (i = 0; i < ccp->cmd_q_count; i++)
iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
}
static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
{
unsigned int i;
for (i = 0; i < ccp->cmd_q_count; i++)
iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
}
static void ccp5_irq_bh(unsigned long data)
{
struct ccp_device *ccp = (struct ccp_device *)data;
u32 status;
unsigned int i;
for (i = 0; i < ccp->cmd_q_count; i++) {
struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
status = ioread32(cmd_q->reg_interrupt_status);
if (status) {
cmd_q->int_status = status;
cmd_q->q_status = ioread32(cmd_q->reg_status);
cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
/* On error, only save the first error value */
if ((status & INT_ERROR) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
cmd_q->int_rcvd = 1;
/* Acknowledge the interrupt and wake the kthread */
iowrite32(status, cmd_q->reg_interrupt_status);
wake_up_interruptible(&cmd_q->int_queue);
}
}
ccp5_enable_queue_interrupts(ccp);
}
static irqreturn_t ccp5_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct ccp_device *ccp = dev_get_drvdata(dev);
ccp5_disable_queue_interrupts(ccp);
if (ccp->use_tasklet)
tasklet_schedule(&ccp->irq_tasklet);
else
ccp5_irq_bh((unsigned long)ccp);
return IRQ_HANDLED;
}
static int ccp5_init(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
@ -729,6 +840,7 @@ static int ccp5_init(struct ccp_device *ccp)
dev_dbg(dev, "queue #%u available\n", i);
}
if (ccp->cmd_q_count == 0) {
dev_notice(dev, "no command queues available\n");
ret = -EIO;
@ -736,19 +848,18 @@ static int ccp5_init(struct ccp_device *ccp)
}
/* Turn off the queues and disable interrupts until ready */
ccp5_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
cmd_q->qcontrol = 0; /* Start with nothing */
iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
/* Disable the interrupts */
iowrite32(0x00, cmd_q->reg_int_enable);
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
/* Clear the interrupts */
iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
/* Clear the interrupt status */
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
}
dev_dbg(dev, "Requesting an IRQ...\n");
@ -758,6 +869,10 @@ static int ccp5_init(struct ccp_device *ccp)
dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool;
}
/* Initialize the ISR tasklet */
if (ccp->use_tasklet)
tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
(unsigned long)ccp);
dev_dbg(dev, "Loading LSB map...\n");
/* Copy the private LSB mask to the public registers */
@ -826,11 +941,7 @@ static int ccp5_init(struct ccp_device *ccp)
}
dev_dbg(dev, "Enabling interrupts...\n");
/* Enable interrupts */
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
}
ccp5_enable_queue_interrupts(ccp);
dev_dbg(dev, "Registering device...\n");
/* Put this on the unit list to make it available */
@ -882,17 +993,15 @@ static void ccp5_destroy(struct ccp_device *ccp)
ccp_del_device(ccp);
/* Disable and clear interrupts */
ccp5_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
/* Turn off the run bit */
iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
/* Disable the interrupts */
iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
/* Clear the interrupt status */
iowrite32(0x00, cmd_q->reg_int_enable);
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
}
@ -925,38 +1034,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
}
}
static irqreturn_t ccp5_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct ccp_device *ccp = dev_get_drvdata(dev);
u32 status;
unsigned int i;
for (i = 0; i < ccp->cmd_q_count; i++) {
struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
status = ioread32(cmd_q->reg_interrupt_status);
if (status) {
cmd_q->int_status = status;
cmd_q->q_status = ioread32(cmd_q->reg_status);
cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
/* On error, only save the first error value */
if ((status & INT_ERROR) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
cmd_q->int_rcvd = 1;
/* Acknowledge the interrupt and wake the kthread */
iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
wake_up_interruptible(&cmd_q->int_queue);
}
}
return IRQ_HANDLED;
}
static void ccp5_config(struct ccp_device *ccp)
{
/* Public side */
@ -994,6 +1071,7 @@ static const struct ccp_actions ccp5_actions = {
.aes = ccp5_perform_aes,
.xts_aes = ccp5_perform_xts_aes,
.sha = ccp5_perform_sha,
.des3 = ccp5_perform_des3,
.rsa = ccp5_perform_rsa,
.passthru = ccp5_perform_passthru,
.ecc = ccp5_perform_ecc,

Просмотреть файл

@ -109,9 +109,8 @@
#define INT_COMPLETION 0x1
#define INT_ERROR 0x2
#define INT_QUEUE_STOPPED 0x4
#define ALL_INTERRUPTS (INT_COMPLETION| \
INT_ERROR| \
INT_QUEUE_STOPPED)
#define INT_EMPTY_QUEUE 0x8
#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
#define LSB_REGION_WIDTH 5
#define MAX_LSB_CNT 8
@ -194,6 +193,9 @@
#define CCP_XTS_AES_KEY_SB_COUNT 1
#define CCP_XTS_AES_CTX_SB_COUNT 1
#define CCP_DES3_KEY_SB_COUNT 1
#define CCP_DES3_CTX_SB_COUNT 1
#define CCP_SHA_SB_COUNT 1
#define CCP_RSA_MAX_WIDTH 4096
@ -337,7 +339,10 @@ struct ccp_device {
void *dev_specific;
int (*get_irq)(struct ccp_device *ccp);
void (*free_irq)(struct ccp_device *ccp);
unsigned int qim;
unsigned int irq;
bool use_tasklet;
struct tasklet_struct irq_tasklet;
/* I/O area used for device communication. The register mapping
* starts at an offset into the mapped bar.
@ -424,33 +429,33 @@ enum ccp_memtype {
};
#define CCP_MEMTYPE_LSB CCP_MEMTYPE_KSB
struct ccp_dma_info {
dma_addr_t address;
unsigned int offset;
unsigned int length;
enum dma_data_direction dir;
};
} __packed __aligned(4);
struct ccp_dm_workarea {
struct device *dev;
struct dma_pool *dma_pool;
unsigned int length;
u8 *address;
struct ccp_dma_info dma;
unsigned int length;
};
struct ccp_sg_workarea {
struct scatterlist *sg;
int nents;
unsigned int sg_used;
struct scatterlist *dma_sg;
struct device *dma_dev;
unsigned int dma_count;
enum dma_data_direction dma_dir;
unsigned int sg_used;
u64 bytes_left;
};
@ -479,6 +484,12 @@ struct ccp_xts_aes_op {
enum ccp_xts_aes_unit_size unit_size;
};
struct ccp_des3_op {
enum ccp_des3_type type;
enum ccp_des3_mode mode;
enum ccp_des3_action action;
};
struct ccp_sha_op {
enum ccp_sha_type type;
u64 msg_bits;
@ -516,6 +527,7 @@ struct ccp_op {
union {
struct ccp_aes_op aes;
struct ccp_xts_aes_op xts;
struct ccp_des3_op des3;
struct ccp_sha_op sha;
struct ccp_rsa_op rsa;
struct ccp_passthru_op passthru;
@ -624,13 +636,13 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp);
struct ccp_actions {
int (*aes)(struct ccp_op *);
int (*xts_aes)(struct ccp_op *);
int (*des3)(struct ccp_op *);
int (*sha)(struct ccp_op *);
int (*rsa)(struct ccp_op *);
int (*passthru)(struct ccp_op *);
int (*ecc)(struct ccp_op *);
u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
void (*sbfree)(struct ccp_cmd_queue *, unsigned int,
unsigned int);
void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int);
unsigned int (*get_free_slots)(struct ccp_cmd_queue *);
int (*init)(struct ccp_device *);
void (*destroy)(struct ccp_device *);

Просмотреть файл

@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/des.h>
#include <linux/ccp.h>
#include "ccp-dev.h"
@ -41,6 +42,20 @@ static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
};
static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
};
static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
};
#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
ccp_gen_jobid(ccp) : 0)
@ -586,6 +601,255 @@ e_key:
return ret;
}
static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
struct ccp_dm_workarea key, ctx, final_wa, tag;
struct ccp_data src, dst;
struct ccp_data aad;
struct ccp_op op;
unsigned long long *final;
unsigned int dm_offset;
unsigned int ilen;
bool in_place = true; /* Default value */
int ret;
struct scatterlist *p_inp, sg_inp[2];
struct scatterlist *p_tag, sg_tag[2];
struct scatterlist *p_outp, sg_outp[2];
struct scatterlist *p_aad;
if (!aes->iv)
return -EINVAL;
if (!((aes->key_len == AES_KEYSIZE_128) ||
(aes->key_len == AES_KEYSIZE_192) ||
(aes->key_len == AES_KEYSIZE_256)))
return -EINVAL;
if (!aes->key) /* Gotta have a key SGL */
return -EINVAL;
/* First, decompose the source buffer into AAD & PT,
* and the destination buffer into AAD, CT & tag, or
* the input into CT & tag.
* It is expected that the input and output SGs will
* be valid, even if the AAD and input lengths are 0.
*/
p_aad = aes->src;
p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
ilen = aes->src_len;
p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
} else {
/* Input length for decryption includes tag */
ilen = aes->src_len - AES_BLOCK_SIZE;
p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
}
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
op.sb_key = cmd_q->sb_key; /* Pre-allocated */
op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
op.init = 1;
op.u.aes.type = aes->type;
/* Copy the key to the LSB */
ret = ccp_init_dm_workarea(&key, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_key;
}
/* Copy the context (IV) to the LSB.
* There is an assumption here that the IV is 96 bits in length, plus
* a nonce of 32 bits. If no IV is present, use a zeroed buffer.
*/
ret = ccp_init_dm_workarea(&ctx, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
}
op.init = 1;
if (aes->aad_len > 0) {
/* Step 1: Run a GHASH over the Additional Authenticated Data */
ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
AES_BLOCK_SIZE,
DMA_TO_DEVICE);
if (ret)
goto e_ctx;
op.u.aes.mode = CCP_AES_MODE_GHASH;
op.u.aes.action = CCP_AES_GHASHAAD;
while (aad.sg_wa.bytes_left) {
ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_aad;
}
ccp_process_data(&aad, NULL, &op);
op.init = 0;
}
}
op.u.aes.mode = CCP_AES_MODE_GCTR;
op.u.aes.action = aes->action;
if (ilen > 0) {
/* Step 2: Run a GCTR over the plaintext */
in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
AES_BLOCK_SIZE,
in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
if (ret)
goto e_ctx;
if (in_place) {
dst = src;
} else {
ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
AES_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
op.soc = 0;
op.eom = 0;
op.init = 1;
while (src.sg_wa.bytes_left) {
ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
if (!src.sg_wa.bytes_left) {
unsigned int nbytes = aes->src_len
% AES_BLOCK_SIZE;
if (nbytes) {
op.eom = 1;
op.u.aes.size = (nbytes * 8) - 1;
}
}
ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
ccp_process_data(&src, &dst, &op);
op.init = 0;
}
}
/* Step 3: Update the IV portion of the context with the original IV */
ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
/* Step 4: Concatenate the lengths of the AAD and source, and
* hash that 16 byte buffer.
*/
ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
final = (unsigned long long *) final_wa.address;
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
op.u.aes.mode = CCP_AES_MODE_GHASH;
op.u.aes.action = CCP_AES_GHASHFINAL;
op.src.type = CCP_MEMTYPE_SYSTEM;
op.src.u.dma.address = final_wa.dma.address;
op.src.u.dma.length = AES_BLOCK_SIZE;
op.dst.type = CCP_MEMTYPE_SYSTEM;
op.dst.u.dma.address = final_wa.dma.address;
op.dst.u.dma.length = AES_BLOCK_SIZE;
op.eom = 1;
op.u.aes.size = 0;
ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret)
goto e_dst;
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
} else {
/* Does this ciphered tag match the input? */
ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (ret)
goto e_tag;
ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
ccp_dm_free(&tag);
}
e_tag:
ccp_dm_free(&final_wa);
e_dst:
if (aes->src_len && !in_place)
ccp_free_data(&dst, cmd_q);
e_src:
if (aes->src_len)
ccp_free_data(&src, cmd_q);
e_aad:
if (aes->aad_len)
ccp_free_data(&aad, cmd_q);
e_ctx:
ccp_dm_free(&ctx);
e_key:
ccp_dm_free(&key);
return ret;
}
static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
@ -599,6 +863,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (aes->mode == CCP_AES_MODE_CMAC)
return ccp_run_aes_cmac_cmd(cmd_q, cmd);
if (aes->mode == CCP_AES_MODE_GCM)
return ccp_run_aes_gcm_cmd(cmd_q, cmd);
if (!((aes->key_len == AES_KEYSIZE_128) ||
(aes->key_len == AES_KEYSIZE_192) ||
(aes->key_len == AES_KEYSIZE_256)))
@ -925,6 +1192,200 @@ e_key:
return ret;
}
static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_des3_engine *des3 = &cmd->u.des3;
struct ccp_dm_workarea key, ctx;
struct ccp_data src, dst;
struct ccp_op op;
unsigned int dm_offset;
unsigned int len_singlekey;
bool in_place = false;
int ret;
/* Error checks */
if (!cmd_q->ccp->vdata->perform->des3)
return -EINVAL;
if (des3->key_len != DES3_EDE_KEY_SIZE)
return -EINVAL;
if (((des3->mode == CCP_DES3_MODE_ECB) ||
(des3->mode == CCP_DES3_MODE_CBC)) &&
(des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
return -EINVAL;
if (!des3->key || !des3->src || !des3->dst)
return -EINVAL;
if (des3->mode != CCP_DES3_MODE_ECB) {
if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
return -EINVAL;
if (!des3->iv)
return -EINVAL;
}
ret = -EIO;
/* Zero out all the fields of the command desc */
memset(&op, 0, sizeof(op));
/* Set up the Function field */
op.cmd_q = cmd_q;
op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
op.sb_key = cmd_q->sb_key;
op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
op.u.des3.type = des3->type;
op.u.des3.mode = des3->mode;
op.u.des3.action = des3->action;
/*
* All supported key sizes fit in a single (32-byte) KSB entry and
* (like AES) must be in little endian format. Use the 256-bit byte
* swap passthru option to convert from big endian to little endian.
*/
ret = ccp_init_dm_workarea(&key, cmd_q,
CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
/*
* The contents of the key triplet are in the reverse order of what
* is required by the engine. Copy the 3 pieces individually to put
* them where they belong.
*/
dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
len_singlekey = des3->key_len / 3;
ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
des3->key, 0, len_singlekey);
ccp_set_dm_area(&key, dm_offset + len_singlekey,
des3->key, len_singlekey, len_singlekey);
ccp_set_dm_area(&key, dm_offset,
des3->key, 2 * len_singlekey, len_singlekey);
/* Copy the key to the SB */
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_key;
}
/*
* The DES3 context fits in a single (32-byte) KSB entry and
* must be in little endian format. Use the 256-bit byte swap
* passthru option to convert from big endian to little endian.
*/
if (des3->mode != CCP_DES3_MODE_ECB) {
u32 load_mode;
op.sb_ctx = cmd_q->sb_ctx;
ret = ccp_init_dm_workarea(&ctx, cmd_q,
CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
/* Load the context into the LSB */
dm_offset = CCP_SB_BYTES - des3->iv_len;
ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
else
load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
load_mode);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
}
}
/*
* Prepare the input and output data workareas. For in-place
* operations we need to set the dma direction to BIDIRECTIONAL
* and copy the src workarea to the dst workarea.
*/
if (sg_virt(des3->src) == sg_virt(des3->dst))
in_place = true;
ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
DES3_EDE_BLOCK_SIZE,
in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
if (ret)
goto e_ctx;
if (in_place)
dst = src;
else {
ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
/* Send data to the CCP DES3 engine */
while (src.sg_wa.bytes_left) {
ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
if (!src.sg_wa.bytes_left) {
op.eom = 1;
/* Since we don't retrieve the context in ECB mode
* we have to wait for the operation to complete
* on the last piece of data
*/
op.soc = 0;
}
ret = cmd_q->ccp->vdata->perform->des3(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
ccp_process_data(&src, &dst, &op);
}
if (des3->mode != CCP_DES3_MODE_ECB) {
/* Retrieve the context and make BE */
ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
/* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
dm_offset = CCP_SB_BYTES - des3->iv_len;
else
dm_offset = 0;
ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
DES3_EDE_BLOCK_SIZE);
}
e_dst:
if (!in_place)
ccp_free_data(&dst, cmd_q);
e_src:
ccp_free_data(&src, cmd_q);
e_ctx:
if (des3->mode != CCP_DES3_MODE_ECB)
ccp_dm_free(&ctx);
e_key:
ccp_dm_free(&key);
return ret;
}
static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_sha_engine *sha = &cmd->u.sha;
@ -955,6 +1416,18 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return -EINVAL;
block_size = SHA256_BLOCK_SIZE;
break;
case CCP_SHA_TYPE_384:
if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
|| sha->ctx_len < SHA384_DIGEST_SIZE)
return -EINVAL;
block_size = SHA384_BLOCK_SIZE;
break;
case CCP_SHA_TYPE_512:
if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
|| sha->ctx_len < SHA512_DIGEST_SIZE)
return -EINVAL;
block_size = SHA512_BLOCK_SIZE;
break;
default:
return -EINVAL;
}
@ -1042,6 +1515,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
sb_count = 1;
ooffset = ioffset = 0;
break;
case CCP_SHA_TYPE_384:
digest_size = SHA384_DIGEST_SIZE;
init = (void *) ccp_sha384_init;
ctx_size = SHA512_DIGEST_SIZE;
sb_count = 2;
ioffset = 0;
ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_512:
digest_size = SHA512_DIGEST_SIZE;
init = (void *) ccp_sha512_init;
ctx_size = SHA512_DIGEST_SIZE;
sb_count = 2;
ooffset = ioffset = 0;
break;
default:
ret = -EINVAL;
goto e_data;
@ -1060,6 +1548,11 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.sha.type = sha->type;
op.u.sha.msg_bits = sha->msg_bits;
/* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
* SHA384/512 require 2 adjacent SB slots, with the right half in the
* first slot, and the left half in the second. Each portion must then
* be in little endian format: use the 256-bit byte swap option.
*/
ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
@ -1071,6 +1564,13 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
case CCP_SHA_TYPE_256:
memcpy(ctx.address + ioffset, init, ctx_size);
break;
case CCP_SHA_TYPE_384:
case CCP_SHA_TYPE_512:
memcpy(ctx.address + ctx_size / 2, init,
ctx_size / 2);
memcpy(ctx.address, init + ctx_size / 2,
ctx_size / 2);
break;
default:
ret = -EINVAL;
goto e_ctx;
@ -1137,6 +1637,15 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
sha->ctx, 0,
digest_size);
break;
case CCP_SHA_TYPE_384:
case CCP_SHA_TYPE_512:
ccp_get_dm_area(&ctx, 0,
sha->ctx, LSB_ITEM_SIZE - ooffset,
LSB_ITEM_SIZE);
ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
sha->ctx, 0,
LSB_ITEM_SIZE - ooffset);
break;
default:
ret = -EINVAL;
goto e_ctx;
@ -1174,6 +1683,16 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ctx.address + ooffset,
digest_size);
break;
case CCP_SHA_TYPE_384:
case CCP_SHA_TYPE_512:
memcpy(hmac_buf + block_size,
ctx.address + LSB_ITEM_SIZE + ooffset,
LSB_ITEM_SIZE);
memcpy(hmac_buf + block_size +
(LSB_ITEM_SIZE - ooffset),
ctx.address,
LSB_ITEM_SIZE);
break;
default:
ret = -EINVAL;
goto e_ctx;
@ -1831,6 +2350,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
case CCP_ENGINE_XTS_AES_128:
ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
break;
case CCP_ENGINE_DES3:
ret = ccp_run_des3_cmd(cmd_q, cmd);
break;
case CCP_ENGINE_SHA:
ret = ccp_run_sha_cmd(cmd_q, cmd);
break;

Просмотреть файл

@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
goto e_irq;
}
}
ccp->use_tasklet = true;
return 0;
@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
goto e_msi;
}
ccp->use_tasklet = true;
return 0;

Просмотреть файл

@ -294,7 +294,7 @@ static inline void get_aes_decrypt_key(unsigned char *dec_key,
static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
{
struct crypto_shash *base_hash = NULL;
struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
switch (ds) {
case SHA1_DIGEST_SIZE:
@ -522,7 +522,7 @@ static inline void create_wreq(struct chcr_context *ctx,
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
unsigned int immdatalen = 0, nr_frags = 0;
if (is_ofld_imm(skb)) {
@ -543,7 +543,7 @@ static inline void create_wreq(struct chcr_context *ctx,
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
is_iv ? iv_loc : IV_NOP, ctx->tx_channel_id);
is_iv ? iv_loc : IV_NOP, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
qid);
@ -721,19 +721,19 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
struct sk_buff *skb;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
return PTR_ERR(skb);
}
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@ -746,19 +746,19 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
struct sk_buff *skb;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
return PTR_ERR(skb);
}
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@ -766,7 +766,9 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx;
struct adapter *adap;
unsigned int id;
int txq_perchan, txq_idx, ntxq;
int err = 0, rxq_perchan, rxq_idx;
id = smp_processor_id();
@ -777,11 +779,18 @@ static int chcr_device_init(struct chcr_context *ctx)
goto out;
}
u_ctx = ULD_CTX(ctx);
adap = padap(ctx->dev);
ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
adap->vres.ncrypto_fc);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
txq_idx = ctx->dev->tx_channel_id * txq_perchan;
txq_idx += id % txq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_channel_id = rxq_idx;
ctx->rx_qidx = rxq_idx;
ctx->tx_qidx = txq_idx;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
@ -935,7 +944,7 @@ static int chcr_ahash_update(struct ahash_request *req)
u_ctx = ULD_CTX(ctx);
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@ -975,7 +984,7 @@ static int chcr_ahash_update(struct ahash_request *req)
}
req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
@ -1028,7 +1037,7 @@ static int chcr_ahash_final(struct ahash_request *req)
return -ENOMEM;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@ -1047,7 +1056,7 @@ static int chcr_ahash_finup(struct ahash_request *req)
u_ctx = ULD_CTX(ctx);
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@ -1079,7 +1088,7 @@ static int chcr_ahash_finup(struct ahash_request *req)
return -ENOMEM;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
@ -1100,7 +1109,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
u_ctx = ULD_CTX(ctx);
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id))) {
ctx->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@ -1130,7 +1139,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
return -ENOMEM;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@ -1334,20 +1343,36 @@ static int chcr_copy_assoc(struct aead_request *req,
return crypto_skcipher_encrypt(skreq);
}
static unsigned char get_hmac(unsigned int authsize)
static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
int aadmax, int wrlen,
unsigned short op_type)
{
switch (authsize) {
case ICV_8:
return CHCR_SCMD_HMAC_CTRL_PL1;
case ICV_10:
return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
case ICV_12:
return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
}
return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
(req->assoclen > aadmax) ||
(src_nent > MAX_SKB_FRAGS) ||
(wrlen > MAX_WR_SIZE))
return 1;
return 0;
}
static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = crypto_aead_ctx(tfm);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, aeadctx->sw_cipher);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
aead_request_set_ad(subreq, req->assoclen);
return op_type ? crypto_aead_decrypt(subreq) :
crypto_aead_encrypt(subreq);
}
static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short qid,
@ -1371,7 +1396,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short stop_offset = 0;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
int err = 0;
int err = -EINVAL, src_nent;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@ -1381,8 +1406,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err;
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
if (src_nent < 0)
goto err;
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
reqctx->dst = src;
@ -1400,7 +1425,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
}
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) {
if (reqctx->dst_nents < 0) {
pr_err("AUTHENC:Invalid Destination sg entries\n");
goto err;
}
@ -1408,6 +1433,12 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
T6_MAX_AAD_SIZE,
transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
op_type)) {
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
goto err;
@ -1489,24 +1520,6 @@ err:
return ERR_PTR(-EINVAL);
}
static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
unsigned short offset)
{
struct page *spage;
unsigned char *addr;
spage = sg_page(sg);
get_page(spage); /* so that it is not freed by NIC */
#ifdef KMAP_ATOMIC_ARGS
addr = kmap_atomic(spage, KM_SOFTIRQ0);
#else
addr = kmap_atomic(spage);
#endif
memset(addr + sg->offset, 0, offset + 1);
kunmap_atomic(addr);
}
static int set_msg_len(u8 *block, unsigned int msglen, int csize)
{
__be32 data;
@ -1570,11 +1583,6 @@ static int ccm_format_packet(struct aead_request *req,
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int rc = 0;
if (req->assoclen > T5_MAX_AAD_SIZE) {
pr_err("CCM: Unsupported AAD data. It should be < %d\n",
T5_MAX_AAD_SIZE);
return -EINVAL;
}
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
reqctx->iv[0] = 3;
memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
@ -1600,13 +1608,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
struct chcr_context *chcrctx)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
unsigned int ivsize = AES_BLOCK_SIZE;
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int c_id = chcrctx->dev->rx_channel_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
unsigned int assoclen;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
@ -1642,8 +1650,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
crypto_aead_authsize(tfm));
sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
cipher_mode, mac_mode, hmac_ctrl,
ivsize >> 1);
cipher_mode, mac_mode,
aeadctx->hmac_ctrl, ivsize >> 1);
sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
1, dst_size);
@ -1719,16 +1727,17 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
unsigned int dst_size = 0, kctx_len;
unsigned int sub_type;
unsigned int authsize = crypto_aead_authsize(tfm);
int err = 0;
int err = -EINVAL, src_nent;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err;
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
if (src_nent < 0)
goto err;
sub_type = get_aead_subtype(tfm);
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
reqctx->dst = src;
@ -1744,7 +1753,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
}
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) {
if (reqctx->dst_nents < 0) {
pr_err("CCM:Invalid Destination sg entries\n");
goto err;
}
@ -1756,6 +1765,13 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
T6_MAX_AAD_SIZE - 18,
transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
op_type)) {
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
@ -1820,8 +1836,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
unsigned char tag_offset = 0;
unsigned int crypt_len = 0;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned char hmac_ctrl = get_hmac(authsize);
int err = 0;
int err = -EINVAL, src_nent;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@ -1831,8 +1846,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
goto err;
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
if (src_nent < 0)
goto err;
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
@ -1854,7 +1869,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
crypt_len = req->cryptlen;
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) {
if (reqctx->dst_nents < 0) {
pr_err("GCM:Invalid Destination sg entries\n");
goto err;
}
@ -1864,6 +1879,12 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
T6_MAX_AAD_SIZE,
transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
op_type)) {
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
goto err;
@ -1881,11 +1902,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
ctx->dev->rx_channel_id, 2, (ivsize ?
(req->assoclen + 1) : 0));
chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
chcr_req->sec_cpl.pldlen =
htonl(req->assoclen + ivsize + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
req->assoclen ? 1 : 0, req->assoclen,
req->assoclen + ivsize + 1, 0);
if (req->cryptlen) {
chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
tag_offset, tag_offset);
@ -1893,17 +1914,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
ivsize >> 1);
} else {
chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ?
1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
0, 0, ivsize >> 1);
}
CHCR_SCMD_AUTH_MODE_GHASH,
aeadctx->hmac_ctrl, ivsize >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, 1, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
@ -1936,15 +1948,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
write_sg_to_skb(skb, &frags, req->src, req->assoclen);
write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
if (req->cryptlen) {
write_sg_to_skb(skb, &frags, src, req->cryptlen);
} else {
aes_gcm_empty_pld_pad(req->dst, authsize - 1);
write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
}
write_sg_to_skb(skb, &frags, src, req->cryptlen);
create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
sizeof(struct cpl_rx_phys_dsgl) + dst_size);
reqctx->skb = skb;
@ -1965,8 +1969,15 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
{
struct chcr_context *ctx = crypto_aead_ctx(tfm);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct aead_alg *alg = crypto_aead_alg(tfm);
crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(aeadctx->sw_cipher))
return PTR_ERR(aeadctx->sw_cipher);
crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
sizeof(struct aead_request) +
crypto_aead_reqsize(aeadctx->sw_cipher)));
aeadctx->null = crypto_get_default_null_skcipher();
if (IS_ERR(aeadctx->null))
return PTR_ERR(aeadctx->null);
@ -1975,7 +1986,11 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
static void chcr_aead_cra_exit(struct crypto_aead *tfm)
{
struct chcr_context *ctx = crypto_aead_ctx(tfm);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
crypto_put_default_null_skcipher();
crypto_free_aead(aeadctx->sw_cipher);
}
static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
@ -1985,7 +2000,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
aeadctx->mayverify = VERIFY_HW;
return 0;
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
@ -2022,7 +2037,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
aeadctx->mayverify = VERIFY_SW;
}
return 0;
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
@ -2062,7 +2077,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
@ -2088,7 +2103,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
@ -2130,10 +2145,10 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
}
static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
static int chcr_ccm_common_setkey(struct crypto_aead *aead,
const u8 *key,
unsigned int keylen)
{
@ -2142,8 +2157,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
unsigned char ck_size, mk_size;
int key_ctx_size = 0;
memcpy(aeadctx->key, key, keylen);
aeadctx->enckey_len = keylen;
key_ctx_size = sizeof(struct _key_ctx) +
((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
if (keylen == AES_KEYSIZE_128) {
@ -2163,9 +2176,32 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
}
aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
key_ctx_size >> 4);
memcpy(aeadctx->key, key, keylen);
aeadctx->enckey_len = keylen;
return 0;
}
static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
const u8 *key,
unsigned int keylen)
{
struct chcr_context *ctx = crypto_aead_ctx(aead);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
int error;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
CRYPTO_TFM_RES_MASK);
if (error)
return error;
return chcr_ccm_common_setkey(aead, key, keylen);
}
static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
@ -2180,7 +2216,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
}
keylen -= 3;
memcpy(aeadctx->salt, key + keylen, 3);
return chcr_aead_ccm_setkey(aead, key, keylen);
return chcr_ccm_common_setkey(aead, key, keylen);
}
static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
@ -2193,6 +2229,17 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ck_size;
int ret = 0, key_ctx_size = 0;
aeadctx->enckey_len = 0;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
& CRYPTO_TFM_REQ_MASK);
ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
CRYPTO_TFM_RES_MASK);
if (ret)
goto out;
if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
keylen > 3) {
keylen -= 4; /* nonce/salt is present in the last 4 bytes */
@ -2207,8 +2254,7 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
} else {
crypto_tfm_set_flags((struct crypto_tfm *)aead,
CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
pr_err("GCM: Invalid key length %d", keylen);
pr_err("GCM: Invalid key length %d\n", keylen);
ret = -EINVAL;
goto out;
}
@ -2259,11 +2305,21 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
int err = 0, i, key_ctx_len = 0;
unsigned char ck_size = 0;
unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
struct crypto_shash *base_hash = NULL;
struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
struct algo_param param;
int align;
u8 *o_ptr = NULL;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
& CRYPTO_TFM_RES_MASK);
if (err)
goto out;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
@ -2296,7 +2352,8 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
goto out;
aeadctx->enckey_len = 0;
return -EINVAL;
}
{
SHASH_DESC_ON_STACK(shash, base_hash);
@ -2351,7 +2408,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
}
out:
aeadctx->enckey_len = 0;
if (base_hash)
if (!IS_ERR(base_hash))
chcr_free_shash(base_hash);
return -EINVAL;
}
@ -2363,11 +2420,21 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct crypto_authenc_keys keys;
int err;
/* it contains auth and cipher key both*/
int key_ctx_len = 0;
unsigned char ck_size = 0;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
& CRYPTO_TFM_RES_MASK);
if (err)
goto out;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
@ -2465,22 +2532,20 @@ static int chcr_aead_op(struct aead_request *req,
}
u_ctx = ULD_CTX(ctx);
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
ctx->tx_channel_id)) {
ctx->tx_qidx)) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
op_type);
if (IS_ERR(skb) || skb == NULL) {
pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
if (IS_ERR(skb) || !skb)
return PTR_ERR(skb);
}
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@ -2673,6 +2738,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-chcr",
.cra_blocksize = 1,
.cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_gcm_ctx),
@ -2691,6 +2757,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "rfc4106-gcm-aes-chcr",
.cra_blocksize = 1,
.cra_priority = CHCR_AEAD_PRIORITY + 1,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_gcm_ctx),
@ -2710,6 +2777,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_name = "ccm(aes)",
.cra_driver_name = "ccm-aes-chcr",
.cra_blocksize = 1,
.cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx),
@ -2728,6 +2796,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_name = "rfc4309(ccm(aes))",
.cra_driver_name = "rfc4309-ccm-aes-chcr",
.cra_blocksize = 1,
.cra_priority = CHCR_AEAD_PRIORITY + 1,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx),
@ -2747,6 +2816,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha1-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@ -2768,6 +2838,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha256-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@ -2788,6 +2859,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha224-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@ -2807,6 +2879,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha384-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@ -2827,6 +2900,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-hmac-sha512-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@ -2847,6 +2921,7 @@ static struct chcr_alg_template driver_algs[] = {
.cra_driver_name =
"authenc-digest_null-cbc-aes-chcr",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_priority = CHCR_AEAD_PRIORITY,
.cra_ctxsize = sizeof(struct chcr_context) +
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_authenc_ctx),
@ -2915,10 +2990,9 @@ static int chcr_register_alg(void)
name = driver_algs[i].alg.crypto.cra_driver_name;
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_priority =
CHCR_CRA_PRIORITY;
driver_algs[i].alg.aead.base.cra_flags =
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK;
driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
driver_algs[i].alg.aead.init = chcr_aead_cra_init;

Просмотреть файл

@ -218,6 +218,10 @@
#define MAX_NK 8
#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
#define MAX_WR_SIZE 512
#define MIN_AUTH_SG 2 /*IV + AAD*/
#define MIN_GCM_SG 2 /* IV + AAD*/
#define MIN_CCM_SG 3 /*IV+AAD+B0*/
struct algo_param {
unsigned int auth_mode;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше