Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "Here is the crypto update for 3.14:

   - Improved crypto_memneq helper
   - Use cyprto_memneq in arch-specific crypto code
   - Replaced orphaned DCP driver with Freescale MXS DCP driver
   - Added AVX/AVX2 version of AESNI-GCM encode and decode
   - Added AMD Cryptographic Coprocessor (CCP) driver
   - Misc fixes"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (41 commits)
  crypto: aesni - fix build on x86 (32bit)
  crypto: mxs - Fix sparse non static symbol warning
  crypto: ccp - CCP device enabled/disabled changes
  crypto: ccp - Cleanup hash invocation calls
  crypto: ccp - Change data length declarations to u64
  crypto: ccp - Check for caller result area before using it
  crypto: ccp - Cleanup scatterlist usage
  crypto: ccp - Apply appropriate gfp_t type to memory allocations
  crypto: drivers - Sort drivers/crypto/Makefile
  ARM: mxs: dts: Enable DCP for MXS
  crypto: mxs - Add Freescale MXS DCP driver
  crypto: mxs - Remove the old DCP driver
  crypto: ahash - Fully restore ahash request before completing
  crypto: aesni - fix build on x86 (32bit)
  crypto: talitos - Remove redundant dev_set_drvdata
  crypto: ccp - Remove redundant dev_set_drvdata
  crypto: crypto4xx - Remove redundant dev_set_drvdata
  crypto: caam - simplify and harden key parsing
  crypto: omap-sham - Fix Polling mode for larger blocks
  crypto: tcrypt - Added speed tests for AEAD crypto alogrithms in tcrypt test suite
  ...
This commit is contained in:
Linus Torvalds 2014-01-23 18:11:00 -08:00
Родитель 6dd9158ae8 79ba451d66
Коммит 13c789a6b2
40 изменённых файлов: 10527 добавлений и 1009 удалений

Просмотреть файл

@ -0,0 +1,17 @@
Freescale DCP (Data Co-Processor) found on i.MX23/i.MX28 .
Required properties:
- compatible : Should be "fsl,<soc>-dcp"
- reg : Should contain MXS DCP registers location and length
- interrupts : Should contain MXS DCP interrupt numbers, VMI IRQ and DCP IRQ
must be supplied, optionally Secure IRQ can be present, but
is currently not implemented and not used.
Example:
dcp@80028000 {
compatible = "fsl,imx28-dcp", "fsl,imx23-dcp";
reg = <0x80028000 0x2000>;
interrupts = <52 53>;
status = "okay";
};

Просмотреть файл

@ -538,6 +538,13 @@ F: drivers/tty/serial/altera_jtaguart.c
F: include/linux/altera_uart.h
F: include/linux/altera_jtaguart.h
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
M: Tom Lendacky <thomas.lendacky@amd.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: drivers/crypto/ccp/
F: include/linux/ccp.h
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Andreas Herrmann <herrmann.der.user@googlemail.com>
L: lm-sensors@lm-sensors.org

Просмотреть файл

@ -337,8 +337,10 @@
};
dcp@80028000 {
compatible = "fsl,imx23-dcp";
reg = <0x80028000 0x2000>;
status = "disabled";
interrupts = <53 54>;
status = "okay";
};
pxp@8002a000 {

Просмотреть файл

@ -813,9 +813,10 @@
};
dcp: dcp@80028000 {
compatible = "fsl,imx28-dcp", "fsl,imx23-dcp";
reg = <0x80028000 0x2000>;
interrupts = <52 53 54>;
compatible = "fsl-dcp";
status = "okay";
};
pxp: pxp@8002a000 {

Просмотреть файл

@ -237,9 +237,9 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
DES_KEY_SIZE)) &&
if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
DES_KEY_SIZE)) &&
(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;

Просмотреть файл

@ -76,6 +76,7 @@ ifeq ($(avx2_supported),yes)
endif
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
crc32c-intel-y := crc32c-intel_glue.o

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -101,6 +101,9 @@ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
int crypto_fpu_init(void);
void crypto_fpu_exit(void);
#define AVX_GEN2_OPTSIZE 640
#define AVX_GEN4_OPTSIZE 4096
#ifdef CONFIG_X86_64
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
@ -150,6 +153,123 @@ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
#ifdef CONFIG_AS_AVX
/*
* asmlinkage void aesni_gcm_precomp_avx_gen2()
* gcm_data *my_ctx_data, context data
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
*/
asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
static void aesni_gcm_enc_avx(void *ctx, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
if (plaintext_len < AVX_GEN2_OPTSIZE) {
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
aad_len, auth_tag, auth_tag_len);
} else {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
aad_len, auth_tag, auth_tag_len);
}
}
static void aesni_gcm_dec_avx(void *ctx, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
if (ciphertext_len < AVX_GEN2_OPTSIZE) {
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
aad_len, auth_tag, auth_tag_len);
} else {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
aad_len, auth_tag, auth_tag_len);
}
}
#endif
#ifdef CONFIG_AS_AVX2
/*
* asmlinkage void aesni_gcm_precomp_avx_gen4()
* gcm_data *my_ctx_data, context data
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
*/
asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
if (plaintext_len < AVX_GEN2_OPTSIZE) {
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
aad_len, auth_tag, auth_tag_len);
} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
aad_len, auth_tag, auth_tag_len);
} else {
aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
aad_len, auth_tag, auth_tag_len);
}
}
static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
if (ciphertext_len < AVX_GEN2_OPTSIZE) {
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
aad, aad_len, auth_tag, auth_tag_len);
} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
aad_len, auth_tag, auth_tag_len);
} else {
aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
aad_len, auth_tag, auth_tag_len);
}
}
#endif
static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
@ -915,7 +1035,7 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
dst = src;
}
aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
+ ((unsigned long)req->cryptlen), auth_tag_len);
@ -996,12 +1116,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
dst = src;
}
aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
authTag, auth_tag_len);
/* Compare generated tag with passed in tag. */
retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
-EBADMSG : 0;
if (one_entry_in_sg) {
@ -1353,6 +1473,27 @@ static int __init aesni_init(void)
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
#ifdef CONFIG_X86_64
#ifdef CONFIG_AS_AVX2
if (boot_cpu_has(X86_FEATURE_AVX2)) {
pr_info("AVX2 version of gcm_enc/dec engaged.\n");
aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
} else
#endif
#ifdef CONFIG_AS_AVX
if (boot_cpu_has(X86_FEATURE_AVX)) {
pr_info("AVX version of gcm_enc/dec engaged.\n");
aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
} else
#endif
{
pr_info("SSE version of gcm_enc/dec engaged.\n");
aesni_gcm_enc_tfm = aesni_gcm_enc;
aesni_gcm_dec_tfm = aesni_gcm_dec;
}
#endif
err = crypto_fpu_init();
if (err)

Просмотреть файл

@ -2,11 +2,6 @@
# Cryptographic API
#
# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
# that will defeat memneq's actual purpose to prevent timing attacks.
CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
CFLAGS_memneq.o := -Os
obj-$(CONFIG_CRYPTO) += crypto.o
crypto-y := api.o cipher.o compress.o memneq.o

Просмотреть файл

@ -213,7 +213,10 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
ahash_op_unaligned_finish(areq, err);
complete(data, err);
areq->base.complete = complete;
areq->base.data = data;
complete(&areq->base, err);
}
static int ahash_op_unaligned(struct ahash_request *req,

Просмотреть файл

@ -72,6 +72,7 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size)
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
while (size >= sizeof(unsigned long)) {
neq |= *(unsigned long *)a ^ *(unsigned long *)b;
OPTIMIZER_HIDE_VAR(neq);
a += sizeof(unsigned long);
b += sizeof(unsigned long);
size -= sizeof(unsigned long);
@ -79,6 +80,7 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size)
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
while (size > 0) {
neq |= *(unsigned char *)a ^ *(unsigned char *)b;
OPTIMIZER_HIDE_VAR(neq);
a += 1;
b += 1;
size -= 1;
@ -89,33 +91,61 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size)
/* Loop-free fast-path for frequently used 16-byte size */
static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
{
unsigned long neq = 0;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (sizeof(unsigned long) == 8)
return ((*(unsigned long *)(a) ^ *(unsigned long *)(b))
| (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
else if (sizeof(unsigned int) == 4)
return ((*(unsigned int *)(a) ^ *(unsigned int *)(b))
| (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4))
| (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8))
| (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
else
if (sizeof(unsigned long) == 8) {
neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8);
OPTIMIZER_HIDE_VAR(neq);
} else if (sizeof(unsigned int) == 4) {
neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12);
OPTIMIZER_HIDE_VAR(neq);
} else
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
return ((*(unsigned char *)(a) ^ *(unsigned char *)(b))
| (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1))
| (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2))
| (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3))
| (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4))
| (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5))
| (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6))
| (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7))
| (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8))
| (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9))
| (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
| (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
| (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
| (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
| (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
| (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
{
neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15);
OPTIMIZER_HIDE_VAR(neq);
}
return neq;
}
/* Compare two areas of memory without leaking timing information,

Просмотреть файл

@ -78,7 +78,7 @@ static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
cpu = *cb_cpu;
rcu_read_lock_bh();
cpumask = rcu_dereference(pcrypt->cb_cpumask);
cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
if (cpumask_test_cpu(cpu, cpumask->mask))
goto out;

Просмотреть файл

@ -137,7 +137,272 @@ out:
return ret;
}
static int test_aead_jiffies(struct aead_request *req, int enc,
int blen, int sec)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + sec * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
if (enc)
ret = crypto_aead_encrypt(req);
else
ret = crypto_aead_decrypt(req);
if (ret)
return ret;
}
printk("%d operations in %d seconds (%ld bytes)\n",
bcount, sec, (long)bcount * blen);
return 0;
}
static int test_aead_cycles(struct aead_request *req, int enc, int blen)
{
unsigned long cycles = 0;
int ret = 0;
int i;
local_irq_disable();
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
ret = crypto_aead_encrypt(req);
else
ret = crypto_aead_decrypt(req);
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
if (enc)
ret = crypto_aead_encrypt(req);
else
ret = crypto_aead_decrypt(req);
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
out:
local_irq_enable();
if (ret == 0)
printk("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
return ret;
}
static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
#define XBUFSIZE 8
#define MAX_IVLEN 32
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++) {
buf[i] = (void *)__get_free_page(GFP_KERNEL);
if (!buf[i])
goto err_free_buf;
}
return 0;
err_free_buf:
while (i-- > 0)
free_page((unsigned long)buf[i]);
return -ENOMEM;
}
static void testmgr_free_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++)
free_page((unsigned long)buf[i]);
}
static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
unsigned int buflen)
{
int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
int k, rem;
np = (np > XBUFSIZE) ? XBUFSIZE : np;
rem = buflen % PAGE_SIZE;
if (np > XBUFSIZE) {
rem = PAGE_SIZE;
np = XBUFSIZE;
}
sg_init_table(sg, np);
for (k = 0; k < np; ++k) {
if (k == (np-1))
sg_set_buf(&sg[k], xbuf[k], rem);
else
sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE);
}
}
static void test_aead_speed(const char *algo, int enc, unsigned int sec,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
unsigned int aad_size, u8 *keysize)
{
unsigned int i, j;
struct crypto_aead *tfm;
int ret = -ENOMEM;
const char *key;
struct aead_request *req;
struct scatterlist *sg;
struct scatterlist *asg;
struct scatterlist *sgout;
const char *e;
void *assoc;
char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
unsigned int *b_size;
unsigned int iv_len;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
if (testmgr_alloc_buf(xbuf))
goto out_noxbuf;
if (testmgr_alloc_buf(axbuf))
goto out_noaxbuf;
if (testmgr_alloc_buf(xoutbuf))
goto out_nooutbuf;
sg = kmalloc(sizeof(*sg) * 8 * 3, GFP_KERNEL);
if (!sg)
goto out_nosg;
asg = &sg[8];
sgout = &asg[8];
printk(KERN_INFO "\ntesting speed of %s %s\n", algo, e);
tfm = crypto_alloc_aead(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("alg: aead: Failed to allocate request for %s\n",
algo);
goto out;
}
i = 0;
do {
b_size = aead_sizes;
do {
assoc = axbuf[0];
if (aad_size < PAGE_SIZE)
memset(assoc, 0xff, aad_size);
else {
pr_err("associate data length (%u) too big\n",
aad_size);
goto out_nosg;
}
sg_init_one(&asg[0], assoc, aad_size);
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
*keysize + *b_size,
TVMEMSIZE * PAGE_SIZE);
goto out;
}
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
ret = crypto_aead_setkey(tfm, key, *keysize);
ret = crypto_aead_setauthsize(tfm, authsize);
iv_len = crypto_aead_ivsize(tfm);
if (iv_len)
memset(&iv, 0xff, iv_len);
crypto_aead_clear_flags(tfm, ~0);
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
i, *keysize * 8, *b_size);
memset(tvmem[0], 0xff, PAGE_SIZE);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_aead_get_flags(tfm));
goto out;
}
sg_init_aead(&sg[0], xbuf,
*b_size + (enc ? authsize : 0));
sg_init_aead(&sgout[0], xoutbuf,
*b_size + (enc ? authsize : 0));
aead_request_set_crypt(req, sg, sgout, *b_size, iv);
aead_request_set_assoc(req, asg, aad_size);
if (sec)
ret = test_aead_jiffies(req, enc, *b_size, sec);
else
ret = test_aead_cycles(req, enc, *b_size);
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out:
crypto_free_aead(tfm);
kfree(sg);
out_nosg:
testmgr_free_buf(xoutbuf);
out_nooutbuf:
testmgr_free_buf(axbuf);
out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
return;
}
static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
struct cipher_speed_template *template,
@ -1427,6 +1692,11 @@ static int do_test(int m)
speed_template_32_64);
break;
case 211:
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_20);
break;
case 300:
/* fall through */

Просмотреть файл

@ -22,6 +22,11 @@ struct cipher_speed_template {
unsigned int klen;
};
struct aead_speed_template {
const char *key;
unsigned int klen;
};
struct hash_speed {
unsigned int blen; /* buffer length */
unsigned int plen; /* per-update length */
@ -57,6 +62,11 @@ static u8 speed_template_32_48[] = {32, 48, 0};
static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
static u8 speed_template_32_64[] = {32, 64, 0};
/*
* AEAD speed tests
*/
static u8 aead_speed_template_20[] = {20, 0};
/*
* Digest speed tests
*/

Просмотреть файл

@ -289,16 +289,6 @@ config CRYPTO_DEV_SAHARA
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
config CRYPTO_DEV_DCP
tristate "Support for the DCP engine"
depends on ARCH_MXS && OF
select CRYPTO_BLKCIPHER
select CRYPTO_AES
select CRYPTO_CBC
help
This options enables support for the hardware crypto-acceleration
capabilities of the DCP co-processor
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210 crypto accelerator"
depends on ARCH_S5PV210
@ -399,4 +389,33 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
config CRYPTO_DEV_CCP
bool "Support for AMD Cryptographic Coprocessor"
depends on X86 && PCI
default n
help
The AMD Cryptographic Coprocessor provides hardware support
for encryption, hashing and related operations.
if CRYPTO_DEV_CCP
source "drivers/crypto/ccp/Kconfig"
endif
config CRYPTO_DEV_MXS_DCP
tristate "Support for Freescale MXS DCP"
depends on ARCH_MXS
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_AES
select CRYPTO_BLKCIPHER
select CRYPTO_ALGAPI
help
The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB
co-processor on the die.
To compile this driver as a module, choose M here: the module
will be called mxs-dcp.
endif # CRYPTO_HW

Просмотреть файл

@ -1,24 +1,25 @@
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_DCP) += dcp.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o

Просмотреть файл

@ -724,7 +724,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
crypto4xx_destroy_pdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_sdr(core_dev->dev);
dev_set_drvdata(core_dev->device, NULL);
iounmap(core_dev->dev->ce_base);
kfree(core_dev->dev);
kfree(core_dev);

Просмотреть файл

@ -467,24 +467,10 @@ static int aead_setkey(struct crypto_aead *aead,
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
unsigned int authkeylen;
unsigned int enckeylen;
struct crypto_authenc_keys keys;
int ret = 0;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
authkeylen = keylen - enckeylen;
if (keylen > CAAM_MAX_KEY_SIZE)
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
/* Pick class 2 key length from algorithm submask */
@ -492,25 +478,29 @@ static int aead_setkey(struct crypto_aead *aead,
OP_ALG_ALGSEL_SHIFT] * 2;
ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
goto badkey;
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
keylen, enckeylen, authkeylen);
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
ret = gen_split_aead_key(ctx, key, authkeylen);
ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
keys.enckeylen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
@ -518,15 +508,15 @@ static int aead_setkey(struct crypto_aead *aead,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->split_key_pad_len + enckeylen, 1);
ctx->split_key_pad_len + keys.enckeylen, 1);
#endif
ctx->enckeylen = enckeylen;
ctx->enckeylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
if (ret) {
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
keys.enckeylen, DMA_TO_DEVICE);
}
return ret;

Просмотреть файл

@ -0,0 +1,24 @@
config CRYPTO_DEV_CCP_DD
tristate "Cryptographic Coprocessor device driver"
depends on CRYPTO_DEV_CCP
default m
select HW_RANDOM
help
Provides the interface to use the AMD Cryptographic Coprocessor
which can be used to accelerate or offload encryption operations
such as SHA, AES and more. If you choose 'M' here, this module
will be called ccp.
config CRYPTO_DEV_CCP_CRYPTO
tristate "Encryption and hashing acceleration support"
depends on CRYPTO_DEV_CCP_DD
default m
select CRYPTO_ALGAPI
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
select CRYPTO_AUTHENC
help
Support for using the cryptographic API with the AMD Cryptographic
Coprocessor. This module supports acceleration and offload of SHA
and AES algorithms. If you choose 'M' here, this module will be
called ccp_crypto.

Просмотреть файл

@ -0,0 +1,10 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
ccp-objs := ccp-dev.o ccp-ops.o
ccp-objs += ccp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes.o \
ccp-crypto-aes-cmac.o \
ccp-crypto-aes-xts.o \
ccp-crypto-sha.o

Просмотреть файл

@ -0,0 +1,365 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include "ccp-crypto.h"
static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
int ret)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
unsigned int digest_size = crypto_ahash_digestsize(tfm);
if (ret)
goto e_free;
if (rctx->hash_rem) {
/* Save remaining data to buffer */
unsigned int offset = rctx->nbytes - rctx->hash_rem;
scatterwalk_map_and_copy(rctx->buf, rctx->src,
offset, rctx->hash_rem, 0);
rctx->buf_count = rctx->hash_rem;
} else
rctx->buf_count = 0;
/* Update result area if supplied */
if (req->result)
memcpy(req->result, rctx->iv, digest_size);
e_free:
sg_free_table(&rctx->data_sg);
return ret;
}
static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
unsigned int final)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
struct scatterlist *sg, *cmac_key_sg = NULL;
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int need_pad, sg_count;
gfp_t gfp;
u64 len;
int ret;
if (!ctx->u.aes.key_len)
return -EINVAL;
if (nbytes)
rctx->null_msg = 0;
len = (u64)rctx->buf_count + (u64)nbytes;
if (!final && (len <= block_size)) {
scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
0, nbytes, 0);
rctx->buf_count += nbytes;
return 0;
}
rctx->src = req->src;
rctx->nbytes = nbytes;
rctx->final = final;
rctx->hash_rem = final ? 0 : len & (block_size - 1);
rctx->hash_cnt = len - rctx->hash_rem;
if (!final && !rctx->hash_rem) {
/* CCP can't do zero length final, so keep some data around */
rctx->hash_cnt -= block_size;
rctx->hash_rem = block_size;
}
if (final && (rctx->null_msg || (len & (block_size - 1))))
need_pad = 1;
else
need_pad = 0;
sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
/* Build the data scatterlist table - allocate enough entries for all
* possible data pieces (buffer, input data, padding)
*/
sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
if (ret)
return ret;
sg = NULL;
if (rctx->buf_count) {
sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
}
if (nbytes)
sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
if (need_pad) {
int pad_length = block_size - (len & (block_size - 1));
rctx->hash_cnt += pad_length;
memset(rctx->pad, 0, sizeof(rctx->pad));
rctx->pad[0] = 0x80;
sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
}
if (sg) {
sg_mark_end(sg);
sg = rctx->data_sg.sgl;
}
/* Initialize the K1/K2 scatterlist */
if (final)
cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg
: &ctx->u.aes.k1_sg;
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_AES;
rctx->cmd.u.aes.type = ctx->u.aes.type;
rctx->cmd.u.aes.mode = ctx->u.aes.mode;
rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT;
rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
rctx->cmd.u.aes.iv = &rctx->iv_sg;
rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
rctx->cmd.u.aes.src = sg;
rctx->cmd.u.aes.src_len = rctx->hash_cnt;
rctx->cmd.u.aes.dst = NULL;
rctx->cmd.u.aes.cmac_key = cmac_key_sg;
rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len;
rctx->cmd.u.aes.cmac_final = final;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
}
static int ccp_aes_cmac_init(struct ahash_request *req)
{
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
memset(rctx, 0, sizeof(*rctx));
rctx->null_msg = 1;
return 0;
}
static int ccp_aes_cmac_update(struct ahash_request *req)
{
return ccp_do_cmac_update(req, req->nbytes, 0);
}
static int ccp_aes_cmac_final(struct ahash_request *req)
{
return ccp_do_cmac_update(req, 0, 1);
}
static int ccp_aes_cmac_finup(struct ahash_request *req)
{
return ccp_do_cmac_update(req, req->nbytes, 1);
}
static int ccp_aes_cmac_digest(struct ahash_request *req)
{
int ret;
ret = ccp_aes_cmac_init(req);
if (ret)
return ret;
return ccp_aes_cmac_finup(req);
}
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct ccp_crypto_ahash_alg *alg =
ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
u64 rb_hi = 0x00, rb_lo = 0x87;
__be64 *gk;
int ret;
switch (key_len) {
case AES_KEYSIZE_128:
ctx->u.aes.type = CCP_AES_TYPE_128;
break;
case AES_KEYSIZE_192:
ctx->u.aes.type = CCP_AES_TYPE_192;
break;
case AES_KEYSIZE_256:
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
/* Set to zero until complete */
ctx->u.aes.key_len = 0;
/* Set the key for the AES cipher used to generate the keys */
ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len);
if (ret)
return ret;
/* Encrypt a block of zeroes - use key area in context */
memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key,
ctx->u.aes.key);
/* Generate K1 and K2 */
k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1));
k1_hi = (k0_hi << 1) | (k0_lo >> 63);
k1_lo = k0_lo << 1;
if (ctx->u.aes.key[0] & 0x80) {
k1_hi ^= rb_hi;
k1_lo ^= rb_lo;
}
gk = (__be64 *)ctx->u.aes.k1;
*gk = cpu_to_be64(k1_hi);
gk++;
*gk = cpu_to_be64(k1_lo);
k2_hi = (k1_hi << 1) | (k1_lo >> 63);
k2_lo = k1_lo << 1;
if (ctx->u.aes.k1[0] & 0x80) {
k2_hi ^= rb_hi;
k2_lo ^= rb_lo;
}
gk = (__be64 *)ctx->u.aes.k2;
*gk = cpu_to_be64(k2_hi);
gk++;
*gk = cpu_to_be64(k2_lo);
ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2));
/* Save the supplied key */
memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
memcpy(ctx->u.aes.key, key, key_len);
ctx->u.aes.key_len = key_len;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
return ret;
}
static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
struct crypto_cipher *cipher_tfm;
ctx->complete = ccp_aes_cmac_complete;
ctx->u.aes.key_len = 0;
crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
cipher_tfm = crypto_alloc_cipher("aes", 0,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(cipher_tfm)) {
pr_warn("could not load aes cipher driver\n");
return PTR_ERR(cipher_tfm);
}
ctx->u.aes.tfm_cipher = cipher_tfm;
return 0;
}
static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->u.aes.tfm_cipher)
crypto_free_cipher(ctx->u.aes.tfm_cipher);
ctx->u.aes.tfm_cipher = NULL;
}
int ccp_register_aes_cmac_algs(struct list_head *head)
{
struct ccp_crypto_ahash_alg *ccp_alg;
struct ahash_alg *alg;
struct hash_alg_common *halg;
struct crypto_alg *base;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
if (!ccp_alg)
return -ENOMEM;
INIT_LIST_HEAD(&ccp_alg->entry);
ccp_alg->mode = CCP_AES_MODE_CMAC;
alg = &ccp_alg->alg;
alg->init = ccp_aes_cmac_init;
alg->update = ccp_aes_cmac_update;
alg->final = ccp_aes_cmac_final;
alg->finup = ccp_aes_cmac_finup;
alg->digest = ccp_aes_cmac_digest;
alg->setkey = ccp_aes_cmac_setkey;
halg = &alg->halg;
halg->digestsize = AES_BLOCK_SIZE;
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = AES_BLOCK_SIZE;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_aes_cmac_cra_init;
base->cra_exit = ccp_aes_cmac_cra_exit;
base->cra_module = THIS_MODULE;
ret = crypto_register_ahash(alg);
if (ret) {
pr_err("%s ahash algorithm registration error (%d)\n",
base->cra_name, ret);
kfree(ccp_alg);
return ret;
}
list_add(&ccp_alg->entry, head);
return 0;
}

Просмотреть файл

@ -0,0 +1,279 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
#include "ccp-crypto.h"
struct ccp_aes_xts_def {
const char *name;
const char *drv_name;
};
static struct ccp_aes_xts_def aes_xts_algs[] = {
{
.name = "xts(aes)",
.drv_name = "xts-aes-ccp",
},
};
struct ccp_unit_size_map {
unsigned int size;
u32 value;
};
static struct ccp_unit_size_map unit_size_map[] = {
{
.size = 4096,
.value = CCP_XTS_AES_UNIT_SIZE_4096,
},
{
.size = 2048,
.value = CCP_XTS_AES_UNIT_SIZE_2048,
},
{
.size = 1024,
.value = CCP_XTS_AES_UNIT_SIZE_1024,
},
{
.size = 512,
.value = CCP_XTS_AES_UNIT_SIZE_512,
},
{
.size = 256,
.value = CCP_XTS_AES_UNIT_SIZE__LAST,
},
{
.size = 128,
.value = CCP_XTS_AES_UNIT_SIZE__LAST,
},
{
.size = 64,
.value = CCP_XTS_AES_UNIT_SIZE__LAST,
},
{
.size = 32,
.value = CCP_XTS_AES_UNIT_SIZE__LAST,
},
{
.size = 16,
.value = CCP_XTS_AES_UNIT_SIZE_16,
},
{
.size = 1,
.value = CCP_XTS_AES_UNIT_SIZE__LAST,
},
};
static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
{
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
if (ret)
return ret;
memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
return 0;
}
static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
/* Only support 128-bit AES key with a 128-bit Tweak key,
* otherwise use the fallback
*/
switch (key_len) {
case AES_KEYSIZE_128 * 2:
memcpy(ctx->u.aes.key, key, key_len);
break;
}
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key,
key_len);
}
static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
unsigned int encrypt)
{
struct crypto_tfm *tfm =
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
unsigned int unit;
int ret;
if (!ctx->u.aes.key_len)
return -EINVAL;
if (req->nbytes & (AES_BLOCK_SIZE - 1))
return -EINVAL;
if (!req->info)
return -EINVAL;
for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
if (!(req->nbytes & (unit_size_map[unit].size - 1)))
break;
if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
*/
ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher);
ret = (encrypt) ? crypto_ablkcipher_encrypt(req) :
crypto_ablkcipher_decrypt(req);
ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
return ret;
}
memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
rctx->cmd.u.xts.iv = &rctx->iv_sg;
rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
rctx->cmd.u.xts.src = req->src;
rctx->cmd.u.xts.src_len = req->nbytes;
rctx->cmd.u.xts.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
}
static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
{
return ccp_aes_xts_crypt(req, 1);
}
static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
{
return ccp_aes_xts_crypt(req, 0);
}
static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ablkcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
fallback_tfm = crypto_alloc_ablkcipher(tfm->__crt_alg->cra_name, 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
pr_warn("could not load fallback driver %s\n",
tfm->__crt_alg->cra_name);
return PTR_ERR(fallback_tfm);
}
ctx->u.aes.tfm_ablkcipher = fallback_tfm;
tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) +
fallback_tfm->base.crt_ablkcipher.reqsize;
return 0;
}
static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->u.aes.tfm_ablkcipher)
crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher);
ctx->u.aes.tfm_ablkcipher = NULL;
}
static int ccp_register_aes_xts_alg(struct list_head *head,
const struct ccp_aes_xts_def *def)
{
struct ccp_crypto_ablkcipher_alg *ccp_alg;
struct crypto_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
if (!ccp_alg)
return -ENOMEM;
INIT_LIST_HEAD(&ccp_alg->entry);
alg = &ccp_alg->alg;
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
alg->cra_blocksize = AES_BLOCK_SIZE;
alg->cra_ctxsize = sizeof(struct ccp_ctx);
alg->cra_priority = CCP_CRA_PRIORITY;
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
alg->cra_init = ccp_aes_xts_cra_init;
alg->cra_exit = ccp_aes_xts_cra_exit;
alg->cra_module = THIS_MODULE;
ret = crypto_register_alg(alg);
if (ret) {
pr_err("%s ablkcipher algorithm registration error (%d)\n",
alg->cra_name, ret);
kfree(ccp_alg);
return ret;
}
list_add(&ccp_alg->entry, head);
return 0;
}
int ccp_register_aes_xts_algs(struct list_head *head)
{
int i, ret;
for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) {
ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]);
if (ret)
return ret;
}
return 0;
}

Просмотреть файл

@ -0,0 +1,369 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/scatterwalk.h>
#include "ccp-crypto.h"
static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
{
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
if (ret)
return ret;
if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
return 0;
}
static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
struct ccp_crypto_ablkcipher_alg *alg =
ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
switch (key_len) {
case AES_KEYSIZE_128:
ctx->u.aes.type = CCP_AES_TYPE_128;
break;
case AES_KEYSIZE_192:
ctx->u.aes.type = CCP_AES_TYPE_192;
break;
case AES_KEYSIZE_256:
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
ctx->u.aes.key_len = key_len;
memcpy(ctx->u.aes.key, key, key_len);
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
return 0;
}
static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int ret;
if (!ctx->u.aes.key_len)
return -EINVAL;
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
(ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
(ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
(req->nbytes & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
if (!req->info)
return -EINVAL;
memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;
iv_len = AES_BLOCK_SIZE;
sg_init_one(iv_sg, rctx->iv, iv_len);
}
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_AES;
rctx->cmd.u.aes.type = ctx->u.aes.type;
rctx->cmd.u.aes.mode = ctx->u.aes.mode;
rctx->cmd.u.aes.action =
(encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
rctx->cmd.u.aes.iv = iv_sg;
rctx->cmd.u.aes.iv_len = iv_len;
rctx->cmd.u.aes.src = req->src;
rctx->cmd.u.aes.src_len = req->nbytes;
rctx->cmd.u.aes.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
}
static int ccp_aes_encrypt(struct ablkcipher_request *req)
{
return ccp_aes_crypt(req, true);
}
static int ccp_aes_decrypt(struct ablkcipher_request *req)
{
return ccp_aes_crypt(req, false);
}
static int ccp_aes_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->complete = ccp_aes_complete;
ctx->u.aes.key_len = 0;
tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
return 0;
}
static void ccp_aes_cra_exit(struct crypto_tfm *tfm)
{
}
static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
int ret)
{
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
/* Restore the original pointer */
req->info = rctx->rfc3686_info;
return ccp_aes_complete(async_req, ret);
}
static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
key_len -= CTR_RFC3686_NONCE_SIZE;
memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE);
return ccp_aes_setkey(tfm, key, key_len);
}
static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
u8 *iv;
/* Initialize the CTR block */
iv = rctx->rfc3686_iv;
memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);
iv += CTR_RFC3686_NONCE_SIZE;
memcpy(iv, req->info, CTR_RFC3686_IV_SIZE);
iv += CTR_RFC3686_IV_SIZE;
*(__be32 *)iv = cpu_to_be32(1);
/* Point to the new IV */
rctx->rfc3686_info = req->info;
req->info = rctx->rfc3686_iv;
return ccp_aes_crypt(req, encrypt);
}
static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req)
{
return ccp_aes_rfc3686_crypt(req, true);
}
static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req)
{
return ccp_aes_rfc3686_crypt(req, false);
}
static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->complete = ccp_aes_rfc3686_complete;
ctx->u.aes.key_len = 0;
tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
return 0;
}
static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm)
{
}
static struct crypto_alg ccp_aes_defaults = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ccp_ctx),
.cra_priority = CCP_CRA_PRIORITY,
.cra_type = &crypto_ablkcipher_type,
.cra_init = ccp_aes_cra_init,
.cra_exit = ccp_aes_cra_exit,
.cra_module = THIS_MODULE,
.cra_ablkcipher = {
.setkey = ccp_aes_setkey,
.encrypt = ccp_aes_encrypt,
.decrypt = ccp_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
};
static struct crypto_alg ccp_aes_rfc3686_defaults = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ccp_ctx),
.cra_priority = CCP_CRA_PRIORITY,
.cra_type = &crypto_ablkcipher_type,
.cra_init = ccp_aes_rfc3686_cra_init,
.cra_exit = ccp_aes_rfc3686_cra_exit,
.cra_module = THIS_MODULE,
.cra_ablkcipher = {
.setkey = ccp_aes_rfc3686_setkey,
.encrypt = ccp_aes_rfc3686_encrypt,
.decrypt = ccp_aes_rfc3686_decrypt,
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
},
};
struct ccp_aes_def {
enum ccp_aes_mode mode;
const char *name;
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
struct crypto_alg *alg_defaults;
};
static struct ccp_aes_def aes_algs[] = {
{
.mode = CCP_AES_MODE_ECB,
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
.ivsize = 0,
.alg_defaults = &ccp_aes_defaults,
},
{
.mode = CCP_AES_MODE_CBC,
.name = "cbc(aes)",
.driver_name = "cbc-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.alg_defaults = &ccp_aes_defaults,
},
{
.mode = CCP_AES_MODE_CFB,
.name = "cfb(aes)",
.driver_name = "cfb-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.alg_defaults = &ccp_aes_defaults,
},
{
.mode = CCP_AES_MODE_OFB,
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccp",
.blocksize = 1,
.ivsize = AES_BLOCK_SIZE,
.alg_defaults = &ccp_aes_defaults,
},
{
.mode = CCP_AES_MODE_CTR,
.name = "ctr(aes)",
.driver_name = "ctr-aes-ccp",
.blocksize = 1,
.ivsize = AES_BLOCK_SIZE,
.alg_defaults = &ccp_aes_defaults,
},
{
.mode = CCP_AES_MODE_CTR,
.name = "rfc3686(ctr(aes))",
.driver_name = "rfc3686-ctr-aes-ccp",
.blocksize = 1,
.ivsize = CTR_RFC3686_IV_SIZE,
.alg_defaults = &ccp_aes_rfc3686_defaults,
},
};
static int ccp_register_aes_alg(struct list_head *head,
const struct ccp_aes_def *def)
{
struct ccp_crypto_ablkcipher_alg *ccp_alg;
struct crypto_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
if (!ccp_alg)
return -ENOMEM;
INIT_LIST_HEAD(&ccp_alg->entry);
ccp_alg->mode = def->mode;
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
alg->cra_blocksize = def->blocksize;
alg->cra_ablkcipher.ivsize = def->ivsize;
ret = crypto_register_alg(alg);
if (ret) {
pr_err("%s ablkcipher algorithm registration error (%d)\n",
alg->cra_name, ret);
kfree(ccp_alg);
return ret;
}
list_add(&ccp_alg->entry, head);
return 0;
}
int ccp_register_aes_algs(struct list_head *head)
{
int i, ret;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
ret = ccp_register_aes_alg(head, &aes_algs[i]);
if (ret)
return ret;
}
return 0;
}

Просмотреть файл

@ -0,0 +1,432 @@
/*
* AMD Cryptographic Coprocessor (CCP) crypto API support
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/ccp.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
#include "ccp-crypto.h"
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
static LIST_HEAD(cipher_algs);
/* For any tfm, requests for that tfm on the same CPU must be returned
* in the order received. With multiple queues available, the CCP can
* process more than one cmd at a time. Therefore we must maintain
* a cmd list to insure the proper ordering of requests on a given tfm/cpu
* combination.
*/
struct ccp_crypto_cpu_queue {
struct list_head cmds;
struct list_head *backlog;
unsigned int cmd_count;
};
#define CCP_CRYPTO_MAX_QLEN 50
struct ccp_crypto_percpu_queue {
struct ccp_crypto_cpu_queue __percpu *cpu_queue;
};
static struct ccp_crypto_percpu_queue req_queue;
struct ccp_crypto_cmd {
struct list_head entry;
struct ccp_cmd *cmd;
/* Save the crypto_tfm and crypto_async_request addresses
* separately to avoid any reference to a possibly invalid
* crypto_async_request structure after invoking the request
* callback
*/
struct crypto_async_request *req;
struct crypto_tfm *tfm;
/* Used for held command processing to determine state */
int ret;
int cpu;
};
struct ccp_crypto_cpu {
struct work_struct work;
struct completion completion;
struct ccp_crypto_cmd *crypto_cmd;
int err;
};
static inline bool ccp_crypto_success(int err)
{
if (err && (err != -EINPROGRESS) && (err != -EBUSY))
return false;
return true;
}
/*
* ccp_crypto_cmd_complete must be called while running on the appropriate
* cpu and the caller must have done a get_cpu to disable preemption
*/
static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
{
struct ccp_crypto_cpu_queue *cpu_queue;
struct ccp_crypto_cmd *held = NULL, *tmp;
*backlog = NULL;
cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
/* Held cmds will be after the current cmd in the queue so start
* searching for a cmd with a matching tfm for submission.
*/
tmp = crypto_cmd;
list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
if (crypto_cmd->tfm != tmp->tfm)
continue;
held = tmp;
break;
}
/* Process the backlog:
* Because cmds can be executed from any point in the cmd list
* special precautions have to be taken when handling the backlog.
*/
if (cpu_queue->backlog != &cpu_queue->cmds) {
/* Skip over this cmd if it is the next backlog cmd */
if (cpu_queue->backlog == &crypto_cmd->entry)
cpu_queue->backlog = crypto_cmd->entry.next;
*backlog = container_of(cpu_queue->backlog,
struct ccp_crypto_cmd, entry);
cpu_queue->backlog = cpu_queue->backlog->next;
/* Skip over this cmd if it is now the next backlog cmd */
if (cpu_queue->backlog == &crypto_cmd->entry)
cpu_queue->backlog = crypto_cmd->entry.next;
}
/* Remove the cmd entry from the list of cmds */
cpu_queue->cmd_count--;
list_del(&crypto_cmd->entry);
return held;
}
static void ccp_crypto_complete_on_cpu(struct work_struct *work)
{
struct ccp_crypto_cpu *cpu_work =
container_of(work, struct ccp_crypto_cpu, work);
struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
struct ccp_crypto_cmd *held, *next, *backlog;
struct crypto_async_request *req = crypto_cmd->req;
struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
int cpu, ret;
cpu = get_cpu();
if (cpu_work->err == -EINPROGRESS) {
/* Only propogate the -EINPROGRESS if necessary */
if (crypto_cmd->ret == -EBUSY) {
crypto_cmd->ret = -EINPROGRESS;
req->complete(req, -EINPROGRESS);
}
goto e_cpu;
}
/* Operation has completed - update the queue before invoking
* the completion callbacks and retrieve the next cmd (cmd with
* a matching tfm) that can be submitted to the CCP.
*/
held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
if (backlog) {
backlog->ret = -EINPROGRESS;
backlog->req->complete(backlog->req, -EINPROGRESS);
}
/* Transition the state from -EBUSY to -EINPROGRESS first */
if (crypto_cmd->ret == -EBUSY)
req->complete(req, -EINPROGRESS);
/* Completion callbacks */
ret = cpu_work->err;
if (ctx->complete)
ret = ctx->complete(req, ret);
req->complete(req, ret);
/* Submit the next cmd */
while (held) {
ret = ccp_enqueue_cmd(held->cmd);
if (ccp_crypto_success(ret))
break;
/* Error occurred, report it and get the next entry */
held->req->complete(held->req, ret);
next = ccp_crypto_cmd_complete(held, &backlog);
if (backlog) {
backlog->ret = -EINPROGRESS;
backlog->req->complete(backlog->req, -EINPROGRESS);
}
kfree(held);
held = next;
}
kfree(crypto_cmd);
e_cpu:
put_cpu();
complete(&cpu_work->completion);
}
static void ccp_crypto_complete(void *data, int err)
{
struct ccp_crypto_cmd *crypto_cmd = data;
struct ccp_crypto_cpu cpu_work;
INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
init_completion(&cpu_work.completion);
cpu_work.crypto_cmd = crypto_cmd;
cpu_work.err = err;
schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
/* Keep the completion call synchronous */
wait_for_completion(&cpu_work.completion);
}
static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
{
struct ccp_crypto_cpu_queue *cpu_queue;
struct ccp_crypto_cmd *active = NULL, *tmp;
int cpu, ret;
cpu = get_cpu();
crypto_cmd->cpu = cpu;
cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
/* Check if the cmd can/should be queued */
if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
ret = -EBUSY;
if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
goto e_cpu;
}
/* Look for an entry with the same tfm. If there is a cmd
* with the same tfm in the list for this cpu then the current
* cmd cannot be submitted to the CCP yet.
*/
list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
if (crypto_cmd->tfm != tmp->tfm)
continue;
active = tmp;
break;
}
ret = -EINPROGRESS;
if (!active) {
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
if (!ccp_crypto_success(ret))
goto e_cpu;
}
if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
ret = -EBUSY;
if (cpu_queue->backlog == &cpu_queue->cmds)
cpu_queue->backlog = &crypto_cmd->entry;
}
crypto_cmd->ret = ret;
cpu_queue->cmd_count++;
list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
e_cpu:
put_cpu();
return ret;
}
/**
* ccp_crypto_enqueue_request - queue an crypto async request for processing
* by the CCP
*
* @req: crypto_async_request struct to be processed
* @cmd: ccp_cmd struct to be sent to the CCP
*/
int ccp_crypto_enqueue_request(struct crypto_async_request *req,
struct ccp_cmd *cmd)
{
struct ccp_crypto_cmd *crypto_cmd;
gfp_t gfp;
int ret;
gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
if (!crypto_cmd)
return -ENOMEM;
/* The tfm pointer must be saved and not referenced from the
* crypto_async_request (req) pointer because it is used after
* completion callback for the request and the req pointer
* might not be valid anymore.
*/
crypto_cmd->cmd = cmd;
crypto_cmd->req = req;
crypto_cmd->tfm = req->tfm;
cmd->callback = ccp_crypto_complete;
cmd->data = crypto_cmd;
if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
cmd->flags |= CCP_CMD_MAY_BACKLOG;
else
cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
ret = ccp_crypto_enqueue_cmd(crypto_cmd);
if (!ccp_crypto_success(ret))
kfree(crypto_cmd);
return ret;
}
struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
struct scatterlist *sg_add)
{
struct scatterlist *sg, *sg_last = NULL;
for (sg = table->sgl; sg; sg = sg_next(sg))
if (!sg_page(sg))
break;
BUG_ON(!sg);
for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
sg_set_page(sg, sg_page(sg_add), sg_add->length,
sg_add->offset);
sg_last = sg;
}
BUG_ON(sg_add);
return sg_last;
}
static int ccp_register_algs(void)
{
int ret;
ret = ccp_register_aes_algs(&cipher_algs);
if (ret)
return ret;
ret = ccp_register_aes_cmac_algs(&hash_algs);
if (ret)
return ret;
ret = ccp_register_aes_xts_algs(&cipher_algs);
if (ret)
return ret;
ret = ccp_register_sha_algs(&hash_algs);
if (ret)
return ret;
return 0;
}
static void ccp_unregister_algs(void)
{
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
crypto_unregister_ahash(&ahash_alg->alg);
list_del(&ahash_alg->entry);
kfree(ahash_alg);
}
list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
crypto_unregister_alg(&ablk_alg->alg);
list_del(&ablk_alg->entry);
kfree(ablk_alg);
}
}
static int ccp_init_queues(void)
{
struct ccp_crypto_cpu_queue *cpu_queue;
int cpu;
req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
if (!req_queue.cpu_queue)
return -ENOMEM;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
INIT_LIST_HEAD(&cpu_queue->cmds);
cpu_queue->backlog = &cpu_queue->cmds;
cpu_queue->cmd_count = 0;
}
return 0;
}
static void ccp_fini_queue(void)
{
struct ccp_crypto_cpu_queue *cpu_queue;
int cpu;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
BUG_ON(!list_empty(&cpu_queue->cmds));
}
free_percpu(req_queue.cpu_queue);
}
static int ccp_crypto_init(void)
{
int ret;
ret = ccp_init_queues();
if (ret)
return ret;
ret = ccp_register_algs();
if (ret) {
ccp_unregister_algs();
ccp_fini_queue();
}
return ret;
}
static void ccp_crypto_exit(void)
{
ccp_unregister_algs();
ccp_fini_queue();
}
module_init(ccp_crypto_init);
module_exit(ccp_crypto_exit);

Просмотреть файл

@ -0,0 +1,517 @@
/*
* AMD Cryptographic Coprocessor (CCP) SHA crypto API support
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
#include "ccp-crypto.h"
struct ccp_sha_result {
struct completion completion;
int err;
};
static void ccp_sync_hash_complete(struct crypto_async_request *req, int err)
{
struct ccp_sha_result *result = req->data;
if (err == -EINPROGRESS)
return;
result->err = err;
complete(&result->completion);
}
static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf,
struct scatterlist *sg, unsigned int len)
{
struct ccp_sha_result result;
struct ahash_request *req;
int ret;
init_completion(&result.completion);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req)
return -ENOMEM;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
ccp_sync_hash_complete, &result);
ahash_request_set_crypt(req, sg, buf, len);
ret = crypto_ahash_digest(req);
if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
ret = wait_for_completion_interruptible(&result.completion);
if (!ret)
ret = result.err;
}
ahash_request_free(req);
return ret;
}
static int ccp_sha_finish_hmac(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct scatterlist sg[2];
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int digest_size = crypto_ahash_digestsize(tfm);
sg_init_table(sg, ARRAY_SIZE(sg));
sg_set_buf(&sg[0], ctx->u.sha.opad, block_size);
sg_set_buf(&sg[1], rctx->ctx, digest_size);
return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg,
block_size + digest_size);
}
static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
unsigned int digest_size = crypto_ahash_digestsize(tfm);
if (ret)
goto e_free;
if (rctx->hash_rem) {
/* Save remaining data to buffer */
unsigned int offset = rctx->nbytes - rctx->hash_rem;
scatterwalk_map_and_copy(rctx->buf, rctx->src,
offset, rctx->hash_rem, 0);
rctx->buf_count = rctx->hash_rem;
} else
rctx->buf_count = 0;
/* Update result area if supplied */
if (req->result)
memcpy(req->result, rctx->ctx, digest_size);
/* If we're doing an HMAC, we need to perform that on the final op */
if (rctx->final && ctx->u.sha.key_len)
ret = ccp_sha_finish_hmac(async_req);
e_free:
sg_free_table(&rctx->data_sg);
return ret;
}
static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
unsigned int final)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct scatterlist *sg;
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int sg_count;
gfp_t gfp;
u64 len;
int ret;
len = (u64)rctx->buf_count + (u64)nbytes;
if (!final && (len <= block_size)) {
scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
0, nbytes, 0);
rctx->buf_count += nbytes;
return 0;
}
rctx->src = req->src;
rctx->nbytes = nbytes;
rctx->final = final;
rctx->hash_rem = final ? 0 : len & (block_size - 1);
rctx->hash_cnt = len - rctx->hash_rem;
if (!final && !rctx->hash_rem) {
/* CCP can't do zero length final, so keep some data around */
rctx->hash_cnt -= block_size;
rctx->hash_rem = block_size;
}
/* Initialize the context scatterlist */
sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx));
sg = NULL;
if (rctx->buf_count && nbytes) {
/* Build the data scatterlist table - allocate enough entries
* for both data pieces (buffer and input data)
*/
gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
sg_count = sg_nents(req->src) + 1;
ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
if (ret)
return ret;
sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
sg_mark_end(sg);
sg = rctx->data_sg.sgl;
} else if (rctx->buf_count) {
sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
sg = &rctx->buf_sg;
} else if (nbytes) {
sg = req->src;
}
rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_SHA;
rctx->cmd.u.sha.type = rctx->type;
rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
rctx->cmd.u.sha.src = sg;
rctx->cmd.u.sha.src_len = rctx->hash_cnt;
rctx->cmd.u.sha.final = rctx->final;
rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
rctx->first = 0;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
}
static int ccp_sha_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_crypto_ahash_alg *alg =
ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
memset(rctx, 0, sizeof(*rctx));
memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx));
rctx->type = alg->type;
rctx->first = 1;
if (ctx->u.sha.key_len) {
/* Buffer the HMAC key for first update */
memcpy(rctx->buf, ctx->u.sha.ipad, block_size);
rctx->buf_count = block_size;
}
return 0;
}
static int ccp_sha_update(struct ahash_request *req)
{
return ccp_do_sha_update(req, req->nbytes, 0);
}
static int ccp_sha_final(struct ahash_request *req)
{
return ccp_do_sha_update(req, 0, 1);
}
static int ccp_sha_finup(struct ahash_request *req)
{
return ccp_do_sha_update(req, req->nbytes, 1);
}
static int ccp_sha_digest(struct ahash_request *req)
{
int ret;
ret = ccp_sha_init(req);
if (ret)
return ret;
return ccp_sha_finup(req);
}
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct scatterlist sg;
unsigned int block_size =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int digest_size = crypto_ahash_digestsize(tfm);
int i, ret;
/* Set to zero until complete */
ctx->u.sha.key_len = 0;
/* Clear key area to provide zero padding for keys smaller
* than the block size
*/
memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key));
if (key_len > block_size) {
/* Must hash the input key */
sg_init_one(&sg, key, key_len);
ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len);
if (ret) {
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
key_len = digest_size;
} else
memcpy(ctx->u.sha.key, key, key_len);
for (i = 0; i < block_size; i++) {
ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36;
ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c;
}
ctx->u.sha.key_len = key_len;
return 0;
}
static int ccp_sha_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
ctx->complete = ccp_sha_complete;
ctx->u.sha.key_len = 0;
crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx));
return 0;
}
static void ccp_sha_cra_exit(struct crypto_tfm *tfm)
{
}
static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
struct crypto_ahash *hmac_tfm;
hmac_tfm = crypto_alloc_ahash(alg->child_alg,
CRYPTO_ALG_TYPE_AHASH, 0);
if (IS_ERR(hmac_tfm)) {
pr_warn("could not load driver %s need for HMAC support\n",
alg->child_alg);
return PTR_ERR(hmac_tfm);
}
ctx->u.sha.hmac_tfm = hmac_tfm;
return ccp_sha_cra_init(tfm);
}
static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->u.sha.hmac_tfm)
crypto_free_ahash(ctx->u.sha.hmac_tfm);
ccp_sha_cra_exit(tfm);
}
static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
cpu_to_be32(SHA1_H4), 0, 0, 0,
};
static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
};
static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
};
struct ccp_sha_def {
const char *name;
const char *drv_name;
const __be32 *init;
enum ccp_sha_type type;
u32 digest_size;
u32 block_size;
};
static struct ccp_sha_def sha_algs[] = {
{
.name = "sha1",
.drv_name = "sha1-ccp",
.init = sha1_init,
.type = CCP_SHA_TYPE_1,
.digest_size = SHA1_DIGEST_SIZE,
.block_size = SHA1_BLOCK_SIZE,
},
{
.name = "sha224",
.drv_name = "sha224-ccp",
.init = sha224_init,
.type = CCP_SHA_TYPE_224,
.digest_size = SHA224_DIGEST_SIZE,
.block_size = SHA224_BLOCK_SIZE,
},
{
.name = "sha256",
.drv_name = "sha256-ccp",
.init = sha256_init,
.type = CCP_SHA_TYPE_256,
.digest_size = SHA256_DIGEST_SIZE,
.block_size = SHA256_BLOCK_SIZE,
},
};
static int ccp_register_hmac_alg(struct list_head *head,
const struct ccp_sha_def *def,
const struct ccp_crypto_ahash_alg *base_alg)
{
struct ccp_crypto_ahash_alg *ccp_alg;
struct ahash_alg *alg;
struct hash_alg_common *halg;
struct crypto_alg *base;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
if (!ccp_alg)
return -ENOMEM;
/* Copy the base algorithm and only change what's necessary */
*ccp_alg = *base_alg;
INIT_LIST_HEAD(&ccp_alg->entry);
strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
alg = &ccp_alg->alg;
alg->setkey = ccp_sha_setkey;
halg = &alg->halg;
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s",
def->drv_name);
base->cra_init = ccp_hmac_sha_cra_init;
base->cra_exit = ccp_hmac_sha_cra_exit;
ret = crypto_register_ahash(alg);
if (ret) {
pr_err("%s ahash algorithm registration error (%d)\n",
base->cra_name, ret);
kfree(ccp_alg);
return ret;
}
list_add(&ccp_alg->entry, head);
return ret;
}
static int ccp_register_sha_alg(struct list_head *head,
const struct ccp_sha_def *def)
{
struct ccp_crypto_ahash_alg *ccp_alg;
struct ahash_alg *alg;
struct hash_alg_common *halg;
struct crypto_alg *base;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
if (!ccp_alg)
return -ENOMEM;
INIT_LIST_HEAD(&ccp_alg->entry);
ccp_alg->init = def->init;
ccp_alg->type = def->type;
alg = &ccp_alg->alg;
alg->init = ccp_sha_init;
alg->update = ccp_sha_update;
alg->final = ccp_sha_final;
alg->finup = ccp_sha_finup;
alg->digest = ccp_sha_digest;
halg = &alg->halg;
halg->digestsize = def->digest_size;
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = def->block_size;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_sha_cra_init;
base->cra_exit = ccp_sha_cra_exit;
base->cra_module = THIS_MODULE;
ret = crypto_register_ahash(alg);
if (ret) {
pr_err("%s ahash algorithm registration error (%d)\n",
base->cra_name, ret);
kfree(ccp_alg);
return ret;
}
list_add(&ccp_alg->entry, head);
ret = ccp_register_hmac_alg(head, def, ccp_alg);
return ret;
}
int ccp_register_sha_algs(struct list_head *head)
{
int i, ret;
for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
ret = ccp_register_sha_alg(head, &sha_algs[i]);
if (ret)
return ret;
}
return 0;
}

Просмотреть файл

@ -0,0 +1,197 @@
/*
* AMD Cryptographic Coprocessor (CCP) crypto API support
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __CCP_CRYPTO_H__
#define __CCP_CRYPTO_H__
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/ccp.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#define CCP_CRA_PRIORITY 300
struct ccp_crypto_ablkcipher_alg {
struct list_head entry;
u32 mode;
struct crypto_alg alg;
};
struct ccp_crypto_ahash_alg {
struct list_head entry;
const __be32 *init;
u32 type;
u32 mode;
/* Child algorithm used for HMAC, CMAC, etc */
char child_alg[CRYPTO_MAX_ALG_NAME];
struct ahash_alg alg;
};
static inline struct ccp_crypto_ablkcipher_alg *
ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
return container_of(alg, struct ccp_crypto_ablkcipher_alg, alg);
}
static inline struct ccp_crypto_ahash_alg *
ccp_crypto_ahash_alg(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
struct ahash_alg *ahash_alg;
ahash_alg = container_of(alg, struct ahash_alg, halg.base);
return container_of(ahash_alg, struct ccp_crypto_ahash_alg, alg);
}
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
struct crypto_ablkcipher *tfm_ablkcipher;
/* Cipher used to generate CMAC K1/K2 keys */
struct crypto_cipher *tfm_cipher;
enum ccp_engine engine;
enum ccp_aes_type type;
enum ccp_aes_mode mode;
struct scatterlist key_sg;
unsigned int key_len;
u8 key[AES_MAX_KEY_SIZE];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
/* CMAC key structures */
struct scatterlist k1_sg;
struct scatterlist k2_sg;
unsigned int kn_len;
u8 k1[AES_BLOCK_SIZE];
u8 k2[AES_BLOCK_SIZE];
};
struct ccp_aes_req_ctx {
struct scatterlist iv_sg;
u8 iv[AES_BLOCK_SIZE];
/* Fields used for RFC3686 requests */
u8 *rfc3686_info;
u8 rfc3686_iv[AES_BLOCK_SIZE];
struct ccp_cmd cmd;
};
struct ccp_aes_cmac_req_ctx {
unsigned int null_msg;
unsigned int final;
struct scatterlist *src;
unsigned int nbytes;
u64 hash_cnt;
unsigned int hash_rem;
struct sg_table data_sg;
struct scatterlist iv_sg;
u8 iv[AES_BLOCK_SIZE];
struct scatterlist buf_sg;
unsigned int buf_count;
u8 buf[AES_BLOCK_SIZE];
struct scatterlist pad_sg;
unsigned int pad_count;
u8 pad[AES_BLOCK_SIZE];
struct ccp_cmd cmd;
};
/***** SHA related defines *****/
#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
struct ccp_sha_ctx {
unsigned int key_len;
u8 key[MAX_SHA_BLOCK_SIZE];
u8 ipad[MAX_SHA_BLOCK_SIZE];
u8 opad[MAX_SHA_BLOCK_SIZE];
struct crypto_ahash *hmac_tfm;
};
struct ccp_sha_req_ctx {
enum ccp_sha_type type;
u64 msg_bits;
unsigned int first;
unsigned int final;
struct scatterlist *src;
unsigned int nbytes;
u64 hash_cnt;
unsigned int hash_rem;
struct sg_table data_sg;
struct scatterlist ctx_sg;
u8 ctx[MAX_SHA_CONTEXT_SIZE];
struct scatterlist buf_sg;
unsigned int buf_count;
u8 buf[MAX_SHA_BLOCK_SIZE];
/* HMAC support field */
struct scatterlist pad_sg;
/* CCP driver command */
struct ccp_cmd cmd;
};
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
union {
struct ccp_aes_ctx aes;
struct ccp_sha_ctx sha;
} u;
};
int ccp_crypto_enqueue_request(struct crypto_async_request *req,
struct ccp_cmd *cmd);
struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
struct scatterlist *sg_add);
int ccp_register_aes_algs(struct list_head *head);
int ccp_register_aes_cmac_algs(struct list_head *head);
int ccp_register_aes_xts_algs(struct list_head *head);
int ccp_register_sha_algs(struct list_head *head);
#endif

Просмотреть файл

@ -0,0 +1,595 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/cpu.h>
#include <asm/cpu_device_id.h>
#include <linux/ccp.h>
#include "ccp-dev.h"
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
static struct ccp_device *ccp_dev;
static inline struct ccp_device *ccp_get_device(void)
{
return ccp_dev;
}
static inline void ccp_add_device(struct ccp_device *ccp)
{
ccp_dev = ccp;
}
static inline void ccp_del_device(struct ccp_device *ccp)
{
ccp_dev = NULL;
}
/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
* @cmd: ccp_cmd struct to be processed
*
* Queue a cmd to be processed by the CCP. If queueing the cmd
* would exceed the defined length of the cmd queue the cmd will
* only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
* result in a return code of -EBUSY.
*
* The callback routine specified in the ccp_cmd struct will be
* called to notify the caller of completion (if the cmd was not
* backlogged) or advancement out of the backlog. If the cmd has
* advanced out of the backlog the "err" value of the callback
* will be -EINPROGRESS. Any other "err" value during callback is
* the result of the operation.
*
* The cmd has been successfully queued if:
* the return code is -EINPROGRESS or
* the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
*/
int ccp_enqueue_cmd(struct ccp_cmd *cmd)
{
struct ccp_device *ccp = ccp_get_device();
unsigned long flags;
unsigned int i;
int ret;
if (!ccp)
return -ENODEV;
/* Caller must supply a callback routine */
if (!cmd->callback)
return -EINVAL;
cmd->ccp = ccp;
spin_lock_irqsave(&ccp->cmd_lock, flags);
i = ccp->cmd_q_count;
if (ccp->cmd_count >= MAX_CMD_QLEN) {
ret = -EBUSY;
if (cmd->flags & CCP_CMD_MAY_BACKLOG)
list_add_tail(&cmd->entry, &ccp->backlog);
} else {
ret = -EINPROGRESS;
ccp->cmd_count++;
list_add_tail(&cmd->entry, &ccp->cmd);
/* Find an idle queue */
if (!ccp->suspending) {
for (i = 0; i < ccp->cmd_q_count; i++) {
if (ccp->cmd_q[i].active)
continue;
break;
}
}
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
/* If we found an idle queue, wake it up */
if (i < ccp->cmd_q_count)
wake_up_process(ccp->cmd_q[i].kthread);
return ret;
}
EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
static void ccp_do_cmd_backlog(struct work_struct *work)
{
struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
struct ccp_device *ccp = cmd->ccp;
unsigned long flags;
unsigned int i;
cmd->callback(cmd->data, -EINPROGRESS);
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->cmd_count++;
list_add_tail(&cmd->entry, &ccp->cmd);
/* Find an idle queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
if (ccp->cmd_q[i].active)
continue;
break;
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
/* If we found an idle queue, wake it up */
if (i < ccp->cmd_q_count)
wake_up_process(ccp->cmd_q[i].kthread);
}
static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
{
struct ccp_device *ccp = cmd_q->ccp;
struct ccp_cmd *cmd = NULL;
struct ccp_cmd *backlog = NULL;
unsigned long flags;
spin_lock_irqsave(&ccp->cmd_lock, flags);
cmd_q->active = 0;
if (ccp->suspending) {
cmd_q->suspended = 1;
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
wake_up_interruptible(&ccp->suspend_queue);
return NULL;
}
if (ccp->cmd_count) {
cmd_q->active = 1;
cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
list_del(&cmd->entry);
ccp->cmd_count--;
}
if (!list_empty(&ccp->backlog)) {
backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
entry);
list_del(&backlog->entry);
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
if (backlog) {
INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
schedule_work(&backlog->work);
}
return cmd;
}
static void ccp_do_cmd_complete(struct work_struct *work)
{
struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
cmd->callback(cmd->data, cmd->ret);
}
static int ccp_cmd_queue_thread(void *data)
{
struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
struct ccp_cmd *cmd;
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
schedule();
set_current_state(TASK_INTERRUPTIBLE);
cmd = ccp_dequeue_cmd(cmd_q);
if (!cmd)
continue;
__set_current_state(TASK_RUNNING);
/* Execute the command */
cmd->ret = ccp_run_cmd(cmd_q, cmd);
/* Schedule the completion callback */
INIT_WORK(&cmd->work, ccp_do_cmd_complete);
schedule_work(&cmd->work);
}
__set_current_state(TASK_RUNNING);
return 0;
}
static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
u32 trng_value;
int len = min_t(int, sizeof(trng_value), max);
/*
* Locking is provided by the caller so we can update device
* hwrng-related fields safely
*/
trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
if (!trng_value) {
/* Zero is returned if not data is available or if a
* bad-entropy error is present. Assume an error if
* we exceed TRNG_RETRIES reads of zero.
*/
if (ccp->hwrng_retries++ > TRNG_RETRIES)
return -EIO;
return 0;
}
/* Reset the counter and save the rng value */
ccp->hwrng_retries = 0;
memcpy(data, &trng_value, len);
return len;
}
/**
* ccp_alloc_struct - allocate and initialize the ccp_device struct
*
* @dev: device struct of the CCP
*/
struct ccp_device *ccp_alloc_struct(struct device *dev)
{
struct ccp_device *ccp;
ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
if (ccp == NULL) {
dev_err(dev, "unable to allocate device struct\n");
return NULL;
}
ccp->dev = dev;
INIT_LIST_HEAD(&ccp->cmd);
INIT_LIST_HEAD(&ccp->backlog);
spin_lock_init(&ccp->cmd_lock);
mutex_init(&ccp->req_mutex);
mutex_init(&ccp->ksb_mutex);
ccp->ksb_count = KSB_COUNT;
ccp->ksb_start = 0;
return ccp;
}
/**
* ccp_init - initialize the CCP device
*
* @ccp: ccp_device struct
*/
int ccp_init(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct ccp_cmd_queue *cmd_q;
struct dma_pool *dma_pool;
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
unsigned int qmr, qim, i;
int ret;
/* Find available queues */
qim = 0;
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
for (i = 0; i < MAX_HW_QUEUES; i++) {
if (!(qmr & (1 << i)))
continue;
/* Allocate a dma pool for this queue */
snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
dma_pool = dma_pool_create(dma_pool_name, dev,
CCP_DMAPOOL_MAX_SIZE,
CCP_DMAPOOL_ALIGN, 0);
if (!dma_pool) {
dev_err(dev, "unable to allocate dma pool\n");
ret = -ENOMEM;
goto e_pool;
}
cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
ccp->cmd_q_count++;
cmd_q->ccp = ccp;
cmd_q->id = i;
cmd_q->dma_pool = dma_pool;
/* Reserve 2 KSB regions for the queue */
cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
ccp->ksb_count -= 2;
/* Preset some register values and masks that are queue
* number dependent
*/
cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
(CMD_Q_STATUS_INCR * i);
cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
(CMD_Q_STATUS_INCR * i);
cmd_q->int_ok = 1 << (i * 2);
cmd_q->int_err = 1 << ((i * 2) + 1);
cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
init_waitqueue_head(&cmd_q->int_queue);
/* Build queue interrupt mask (two interrupts per queue) */
qim |= cmd_q->int_ok | cmd_q->int_err;
dev_dbg(dev, "queue #%u available\n", i);
}
if (ccp->cmd_q_count == 0) {
dev_notice(dev, "no command queues available\n");
ret = -EIO;
goto e_pool;
}
dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
/* Disable and clear interrupts until ready */
iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
}
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
/* Request an irq */
ret = ccp->get_irq(ccp);
if (ret) {
dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool;
}
/* Initialize the queues used to wait for KSB space and suspend */
init_waitqueue_head(&ccp->ksb_queue);
init_waitqueue_head(&ccp->suspend_queue);
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
struct task_struct *kthread;
cmd_q = &ccp->cmd_q[i];
kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
"ccp-q%u", cmd_q->id);
if (IS_ERR(kthread)) {
dev_err(dev, "error creating queue thread (%ld)\n",
PTR_ERR(kthread));
ret = PTR_ERR(kthread);
goto e_kthread;
}
cmd_q->kthread = kthread;
wake_up_process(kthread);
}
/* Register the RNG */
ccp->hwrng.name = "ccp-rng";
ccp->hwrng.read = ccp_trng_read;
ret = hwrng_register(&ccp->hwrng);
if (ret) {
dev_err(dev, "error registering hwrng (%d)\n", ret);
goto e_kthread;
}
/* Make the device struct available before enabling interrupts */
ccp_add_device(ccp);
/* Enable interrupts */
iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
return 0;
e_kthread:
for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
ccp->free_irq(ccp);
e_pool:
for (i = 0; i < ccp->cmd_q_count; i++)
dma_pool_destroy(ccp->cmd_q[i].dma_pool);
return ret;
}
/**
* ccp_destroy - tear down the CCP device
*
* @ccp: ccp_device struct
*/
void ccp_destroy(struct ccp_device *ccp)
{
struct ccp_cmd_queue *cmd_q;
struct ccp_cmd *cmd;
unsigned int qim, i;
/* Remove general access to the device struct */
ccp_del_device(ccp);
/* Unregister the RNG */
hwrng_unregister(&ccp->hwrng);
/* Stop the queue kthreads */
for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
/* Build queue interrupt mask (two interrupt masks per queue) */
qim = 0;
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
qim |= cmd_q->int_ok | cmd_q->int_err;
}
/* Disable and clear interrupts */
iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status);
}
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
ccp->free_irq(ccp);
for (i = 0; i < ccp->cmd_q_count; i++)
dma_pool_destroy(ccp->cmd_q[i].dma_pool);
/* Flush the cmd and backlog queue */
while (!list_empty(&ccp->cmd)) {
/* Invoke the callback directly with an error code */
cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
list_del(&cmd->entry);
cmd->callback(cmd->data, -ENODEV);
}
while (!list_empty(&ccp->backlog)) {
/* Invoke the callback directly with an error code */
cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
list_del(&cmd->entry);
cmd->callback(cmd->data, -ENODEV);
}
}
/**
* ccp_irq_handler - handle interrupts generated by the CCP device
*
* @irq: the irq associated with the interrupt
* @data: the data value supplied when the irq was created
*/
irqreturn_t ccp_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct ccp_device *ccp = dev_get_drvdata(dev);
struct ccp_cmd_queue *cmd_q;
u32 q_int, status;
unsigned int i;
status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
q_int = status & (cmd_q->int_ok | cmd_q->int_err);
if (q_int) {
cmd_q->int_status = status;
cmd_q->q_status = ioread32(cmd_q->reg_status);
cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
/* On error, only save the first error value */
if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
cmd_q->int_rcvd = 1;
/* Acknowledge the interrupt and wake the kthread */
iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
wake_up_interruptible(&cmd_q->int_queue);
}
}
return IRQ_HANDLED;
}
#ifdef CONFIG_PM
bool ccp_queues_suspended(struct ccp_device *ccp)
{
unsigned int suspended = 0;
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].suspended)
suspended++;
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
return ccp->cmd_q_count == suspended;
}
#endif
static const struct x86_cpu_id ccp_support[] = {
{ X86_VENDOR_AMD, 22, },
};
static int __init ccp_mod_init(void)
{
struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
int ret;
if (!x86_match_cpu(ccp_support))
return -ENODEV;
switch (cpuinfo->x86) {
case 22:
if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
return -ENODEV;
ret = ccp_pci_init();
if (ret)
return ret;
/* Don't leave the driver loaded if init failed */
if (!ccp_get_device()) {
ccp_pci_exit();
return -ENODEV;
}
return 0;
break;
}
return -ENODEV;
}
static void __exit ccp_mod_exit(void)
{
struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
switch (cpuinfo->x86) {
case 22:
ccp_pci_exit();
break;
}
}
module_init(ccp_mod_init);
module_exit(ccp_mod_exit);

Просмотреть файл

@ -0,0 +1,272 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __CCP_DEV_H__
#define __CCP_DEV_H__
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/dmapool.h>
#include <linux/hw_random.h>
#define IO_OFFSET 0x20000
#define MAX_DMAPOOL_NAME_LEN 32
#define MAX_HW_QUEUES 5
#define MAX_CMD_QLEN 100
#define TRNG_RETRIES 10
/****** Register Mappings ******/
#define Q_MASK_REG 0x000
#define TRNG_OUT_REG 0x00c
#define IRQ_MASK_REG 0x040
#define IRQ_STATUS_REG 0x200
#define DEL_CMD_Q_JOB 0x124
#define DEL_Q_ACTIVE 0x00000200
#define DEL_Q_ID_SHIFT 6
#define CMD_REQ0 0x180
#define CMD_REQ_INCR 0x04
#define CMD_Q_STATUS_BASE 0x210
#define CMD_Q_INT_STATUS_BASE 0x214
#define CMD_Q_STATUS_INCR 0x20
#define CMD_Q_CACHE 0x228
#define CMD_Q_CACHE_INC 0x20
#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f);
#define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f);
/****** REQ0 Related Values ******/
#define REQ0_WAIT_FOR_WRITE 0x00000004
#define REQ0_INT_ON_COMPLETE 0x00000002
#define REQ0_STOP_ON_COMPLETE 0x00000001
#define REQ0_CMD_Q_SHIFT 9
#define REQ0_JOBID_SHIFT 3
/****** REQ1 Related Values ******/
#define REQ1_PROTECT_SHIFT 27
#define REQ1_ENGINE_SHIFT 23
#define REQ1_KEY_KSB_SHIFT 2
#define REQ1_EOM 0x00000002
#define REQ1_INIT 0x00000001
/* AES Related Values */
#define REQ1_AES_TYPE_SHIFT 21
#define REQ1_AES_MODE_SHIFT 18
#define REQ1_AES_ACTION_SHIFT 17
#define REQ1_AES_CFB_SIZE_SHIFT 10
/* XTS-AES Related Values */
#define REQ1_XTS_AES_SIZE_SHIFT 10
/* SHA Related Values */
#define REQ1_SHA_TYPE_SHIFT 21
/* RSA Related Values */
#define REQ1_RSA_MOD_SIZE_SHIFT 10
/* Pass-Through Related Values */
#define REQ1_PT_BW_SHIFT 12
#define REQ1_PT_BS_SHIFT 10
/* ECC Related Values */
#define REQ1_ECC_AFFINE_CONVERT 0x00200000
#define REQ1_ECC_FUNCTION_SHIFT 18
/****** REQ4 Related Values ******/
#define REQ4_KSB_SHIFT 18
#define REQ4_MEMTYPE_SHIFT 16
/****** REQ6 Related Values ******/
#define REQ6_MEMTYPE_SHIFT 16
/****** Key Storage Block ******/
#define KSB_START 77
#define KSB_END 127
#define KSB_COUNT (KSB_END - KSB_START + 1)
#define CCP_KSB_BITS 256
#define CCP_KSB_BYTES 32
#define CCP_JOBID_MASK 0x0000003f
#define CCP_DMAPOOL_MAX_SIZE 64
#define CCP_DMAPOOL_ALIGN (1 << 5)
#define CCP_REVERSE_BUF_SIZE 64
#define CCP_AES_KEY_KSB_COUNT 1
#define CCP_AES_CTX_KSB_COUNT 1
#define CCP_XTS_AES_KEY_KSB_COUNT 1
#define CCP_XTS_AES_CTX_KSB_COUNT 1
#define CCP_SHA_KSB_COUNT 1
#define CCP_RSA_MAX_WIDTH 4096
#define CCP_PASSTHRU_BLOCKSIZE 256
#define CCP_PASSTHRU_MASKSIZE 32
#define CCP_PASSTHRU_KSB_COUNT 1
#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
#define CCP_ECC_MAX_OPERANDS 6
#define CCP_ECC_MAX_OUTPUTS 3
#define CCP_ECC_SRC_BUF_SIZE 448
#define CCP_ECC_DST_BUF_SIZE 192
#define CCP_ECC_OPERAND_SIZE 64
#define CCP_ECC_OUTPUT_SIZE 64
#define CCP_ECC_RESULT_OFFSET 60
#define CCP_ECC_RESULT_SUCCESS 0x0001
struct ccp_device;
struct ccp_cmd;
struct ccp_cmd_queue {
struct ccp_device *ccp;
/* Queue identifier */
u32 id;
/* Queue dma pool */
struct dma_pool *dma_pool;
/* Queue reserved KSB regions */
u32 ksb_key;
u32 ksb_ctx;
/* Queue processing thread */
struct task_struct *kthread;
unsigned int active;
unsigned int suspended;
/* Number of free command slots available */
unsigned int free_slots;
/* Interrupt masks */
u32 int_ok;
u32 int_err;
/* Register addresses for queue */
void __iomem *reg_status;
void __iomem *reg_int_status;
/* Status values from job */
u32 int_status;
u32 q_status;
u32 q_int_status;
u32 cmd_error;
/* Interrupt wait queue */
wait_queue_head_t int_queue;
unsigned int int_rcvd;
} ____cacheline_aligned;
struct ccp_device {
struct device *dev;
/*
* Bus specific device information
*/
void *dev_specific;
int (*get_irq)(struct ccp_device *ccp);
void (*free_irq)(struct ccp_device *ccp);
/*
* I/O area used for device communication. The register mapping
* starts at an offset into the mapped bar.
* The CMD_REQx registers and the Delete_Cmd_Queue_Job register
* need to be protected while a command queue thread is accessing
* them.
*/
struct mutex req_mutex ____cacheline_aligned;
void __iomem *io_map;
void __iomem *io_regs;
/*
* Master lists that all cmds are queued on. Because there can be
* more than one CCP command queue that can process a cmd a separate
* backlog list is neeeded so that the backlog completion call
* completes before the cmd is available for execution.
*/
spinlock_t cmd_lock ____cacheline_aligned;
unsigned int cmd_count;
struct list_head cmd;
struct list_head backlog;
/*
* The command queues. These represent the queues available on the
* CCP that are available for processing cmds
*/
struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
unsigned int cmd_q_count;
/*
* Support for the CCP True RNG
*/
struct hwrng hwrng;
unsigned int hwrng_retries;
/*
* A counter used to generate job-ids for cmds submitted to the CCP
*/
atomic_t current_id ____cacheline_aligned;
/*
* The CCP uses key storage blocks (KSB) to maintain context for certain
* operations. To prevent multiple cmds from using the same KSB range
* a command queue reserves a KSB range for the duration of the cmd.
* Each queue, will however, reserve 2 KSB blocks for operations that
* only require single KSB entries (eg. AES context/iv and key) in order
* to avoid allocation contention. This will reserve at most 10 KSB
* entries, leaving 40 KSB entries available for dynamic allocation.
*/
struct mutex ksb_mutex ____cacheline_aligned;
DECLARE_BITMAP(ksb, KSB_COUNT);
wait_queue_head_t ksb_queue;
unsigned int ksb_avail;
unsigned int ksb_count;
u32 ksb_start;
/* Suspend support */
unsigned int suspending;
wait_queue_head_t suspend_queue;
};
int ccp_pci_init(void);
void ccp_pci_exit(void);
struct ccp_device *ccp_alloc_struct(struct device *dev);
int ccp_init(struct ccp_device *ccp);
void ccp_destroy(struct ccp_device *ccp);
bool ccp_queues_suspended(struct ccp_device *ccp);
irqreturn_t ccp_irq_handler(int irq, void *data);
int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
#endif

2024
drivers/crypto/ccp/ccp-ops.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,361 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/ccp.h>
#include "ccp-dev.h"
#define IO_BAR 2
#define MSIX_VECTORS 2
struct ccp_msix {
u32 vector;
char name[16];
};
struct ccp_pci {
int msix_count;
struct ccp_msix msix[MSIX_VECTORS];
};
static int ccp_get_msix_irqs(struct ccp_device *ccp)
{
struct ccp_pci *ccp_pci = ccp->dev_specific;
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct msix_entry msix_entry[MSIX_VECTORS];
unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
int v, ret;
for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
msix_entry[v].entry = v;
while ((ret = pci_enable_msix(pdev, msix_entry, v)) > 0)
v = ret;
if (ret)
return ret;
ccp_pci->msix_count = v;
for (v = 0; v < ccp_pci->msix_count; v++) {
/* Set the interrupt names and request the irqs */
snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
ccp_pci->msix[v].vector = msix_entry[v].vector;
ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler,
0, ccp_pci->msix[v].name, dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
ret);
goto e_irq;
}
}
return 0;
e_irq:
while (v--)
free_irq(ccp_pci->msix[v].vector, dev);
pci_disable_msix(pdev);
ccp_pci->msix_count = 0;
return ret;
}
static int ccp_get_msi_irq(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
int ret;
ret = pci_enable_msi(pdev);
if (ret)
return ret;
ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
goto e_msi;
}
return 0;
e_msi:
pci_disable_msi(pdev);
return ret;
}
static int ccp_get_irqs(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
int ret;
ret = ccp_get_msix_irqs(ccp);
if (!ret)
return 0;
/* Couldn't get MSI-X vectors, try MSI */
dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
ret = ccp_get_msi_irq(ccp);
if (!ret)
return 0;
/* Couldn't get MSI interrupt */
dev_notice(dev, "could not enable MSI (%d)\n", ret);
return ret;
}
static void ccp_free_irqs(struct ccp_device *ccp)
{
struct ccp_pci *ccp_pci = ccp->dev_specific;
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
if (ccp_pci->msix_count) {
while (ccp_pci->msix_count--)
free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
dev);
pci_disable_msix(pdev);
} else {
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
}
}
static int ccp_find_mmio_area(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
resource_size_t io_len;
unsigned long io_flags;
int bar;
io_flags = pci_resource_flags(pdev, IO_BAR);
io_len = pci_resource_len(pdev, IO_BAR);
if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
return IO_BAR;
for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
io_flags = pci_resource_flags(pdev, bar);
io_len = pci_resource_len(pdev, bar);
if ((io_flags & IORESOURCE_MEM) &&
(io_len >= (IO_OFFSET + 0x800)))
return bar;
}
return -EIO;
}
static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ccp_device *ccp;
struct ccp_pci *ccp_pci;
struct device *dev = &pdev->dev;
unsigned int bar;
int ret;
ret = -ENOMEM;
ccp = ccp_alloc_struct(dev);
if (!ccp)
goto e_err;
ccp_pci = kzalloc(sizeof(*ccp_pci), GFP_KERNEL);
if (!ccp_pci) {
ret = -ENOMEM;
goto e_free1;
}
ccp->dev_specific = ccp_pci;
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
ret = pci_request_regions(pdev, "ccp");
if (ret) {
dev_err(dev, "pci_request_regions failed (%d)\n", ret);
goto e_free2;
}
ret = pci_enable_device(pdev);
if (ret) {
dev_err(dev, "pci_enable_device failed (%d)\n", ret);
goto e_regions;
}
pci_set_master(pdev);
ret = ccp_find_mmio_area(ccp);
if (ret < 0)
goto e_device;
bar = ret;
ret = -EIO;
ccp->io_map = pci_iomap(pdev, bar, 0);
if (ccp->io_map == NULL) {
dev_err(dev, "pci_iomap failed\n");
goto e_device;
}
ccp->io_regs = ccp->io_map + IO_OFFSET;
ret = dma_set_mask(dev, DMA_BIT_MASK(48));
if (ret == 0) {
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(dev,
"pci_set_consistent_dma_mask failed (%d)\n",
ret);
goto e_bar0;
}
} else {
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
goto e_bar0;
}
}
dev_set_drvdata(dev, ccp);
ret = ccp_init(ccp);
if (ret)
goto e_bar0;
dev_notice(dev, "enabled\n");
return 0;
e_bar0:
pci_iounmap(pdev, ccp->io_map);
e_device:
pci_disable_device(pdev);
e_regions:
pci_release_regions(pdev);
e_free2:
kfree(ccp_pci);
e_free1:
kfree(ccp);
e_err:
dev_notice(dev, "initialization failed\n");
return ret;
}
static void ccp_pci_remove(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
if (!ccp)
return;
ccp_destroy(ccp);
pci_iounmap(pdev, ccp->io_map);
pci_disable_device(pdev);
pci_release_regions(pdev);
kfree(ccp);
dev_notice(dev, "disabled\n");
}
#ifdef CONFIG_PM
static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 1;
/* Wake all the queue kthreads to prepare for suspend */
for (i = 0; i < ccp->cmd_q_count; i++)
wake_up_process(ccp->cmd_q[i].kthread);
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
/* Wait for all queue kthreads to say they're done */
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
return 0;
}
static int ccp_pci_resume(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 0;
/* Wake up all the kthreads */
for (i = 0; i < ccp->cmd_q_count; i++) {
ccp->cmd_q[i].suspended = 0;
wake_up_process(ccp->cmd_q[i].kthread);
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
return 0;
}
#endif
static DEFINE_PCI_DEVICE_TABLE(ccp_pci_table) = {
{ PCI_VDEVICE(AMD, 0x1537), },
/* Last entry must be zero */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ccp_pci_table);
static struct pci_driver ccp_pci_driver = {
.name = "AMD Cryptographic Coprocessor",
.id_table = ccp_pci_table,
.probe = ccp_pci_probe,
.remove = ccp_pci_remove,
#ifdef CONFIG_PM
.suspend = ccp_pci_suspend,
.resume = ccp_pci_resume,
#endif
};
int ccp_pci_init(void)
{
return pci_register_driver(&ccp_pci_driver);
}
void ccp_pci_exit(void)
{
pci_unregister_driver(&ccp_pci_driver);
}

Просмотреть файл

@ -1,903 +0,0 @@
/*
* Cryptographic API.
*
* Support for DCP cryptographic accelerator.
*
* Copyright (c) 2013
* Author: Tobias Rauter <tobias.rauter@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Based on tegra-aes.c, dcp.c (from freescale SDK) and sahara.c
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/miscdevice.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
/* IOCTL for DCP OTP Key AES - taken from Freescale's SDK*/
#define DBS_IOCTL_BASE 'd'
#define DBS_ENC _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16])
#define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16])
/* DCP channel used for AES */
#define USED_CHANNEL 1
/* Ring Buffers' maximum size */
#define DCP_MAX_PKG 20
/* Control Register */
#define DCP_REG_CTRL 0x000
#define DCP_CTRL_SFRST (1<<31)
#define DCP_CTRL_CLKGATE (1<<30)
#define DCP_CTRL_CRYPTO_PRESENT (1<<29)
#define DCP_CTRL_SHA_PRESENT (1<<28)
#define DCP_CTRL_GATHER_RES_WRITE (1<<23)
#define DCP_CTRL_ENABLE_CONTEXT_CACHE (1<<22)
#define DCP_CTRL_ENABLE_CONTEXT_SWITCH (1<<21)
#define DCP_CTRL_CH_IRQ_E_0 0x01
#define DCP_CTRL_CH_IRQ_E_1 0x02
#define DCP_CTRL_CH_IRQ_E_2 0x04
#define DCP_CTRL_CH_IRQ_E_3 0x08
/* Status register */
#define DCP_REG_STAT 0x010
#define DCP_STAT_OTP_KEY_READY (1<<28)
#define DCP_STAT_CUR_CHANNEL(stat) ((stat>>24)&0x0F)
#define DCP_STAT_READY_CHANNEL(stat) ((stat>>16)&0x0F)
#define DCP_STAT_IRQ(stat) (stat&0x0F)
#define DCP_STAT_CHAN_0 (0x01)
#define DCP_STAT_CHAN_1 (0x02)
#define DCP_STAT_CHAN_2 (0x04)
#define DCP_STAT_CHAN_3 (0x08)
/* Channel Control Register */
#define DCP_REG_CHAN_CTRL 0x020
#define DCP_CHAN_CTRL_CH0_IRQ_MERGED (1<<16)
#define DCP_CHAN_CTRL_HIGH_PRIO_0 (0x0100)
#define DCP_CHAN_CTRL_HIGH_PRIO_1 (0x0200)
#define DCP_CHAN_CTRL_HIGH_PRIO_2 (0x0400)
#define DCP_CHAN_CTRL_HIGH_PRIO_3 (0x0800)
#define DCP_CHAN_CTRL_ENABLE_0 (0x01)
#define DCP_CHAN_CTRL_ENABLE_1 (0x02)
#define DCP_CHAN_CTRL_ENABLE_2 (0x04)
#define DCP_CHAN_CTRL_ENABLE_3 (0x08)
/*
* Channel Registers:
* The DCP has 4 channels. Each of this channels
* has 4 registers (command pointer, semaphore, status and options).
* The address of register REG of channel CHAN is obtained by
* dcp_chan_reg(REG, CHAN)
*/
#define DCP_REG_CHAN_PTR 0x00000100
#define DCP_REG_CHAN_SEMA 0x00000110
#define DCP_REG_CHAN_STAT 0x00000120
#define DCP_REG_CHAN_OPT 0x00000130
#define DCP_CHAN_STAT_NEXT_CHAIN_IS_0 0x010000
#define DCP_CHAN_STAT_NO_CHAIN 0x020000
#define DCP_CHAN_STAT_CONTEXT_ERROR 0x030000
#define DCP_CHAN_STAT_PAYLOAD_ERROR 0x040000
#define DCP_CHAN_STAT_INVALID_MODE 0x050000
#define DCP_CHAN_STAT_PAGEFAULT 0x40
#define DCP_CHAN_STAT_DST 0x20
#define DCP_CHAN_STAT_SRC 0x10
#define DCP_CHAN_STAT_PACKET 0x08
#define DCP_CHAN_STAT_SETUP 0x04
#define DCP_CHAN_STAT_MISMATCH 0x02
/* hw packet control*/
#define DCP_PKT_PAYLOAD_KEY (1<<11)
#define DCP_PKT_OTP_KEY (1<<10)
#define DCP_PKT_CIPHER_INIT (1<<9)
#define DCP_PKG_CIPHER_ENCRYPT (1<<8)
#define DCP_PKT_CIPHER_ENABLE (1<<5)
#define DCP_PKT_DECR_SEM (1<<1)
#define DCP_PKT_CHAIN (1<<2)
#define DCP_PKT_IRQ 1
#define DCP_PKT_MODE_CBC (1<<4)
#define DCP_PKT_KEYSELECT_OTP (0xFF<<8)
/* cipher flags */
#define DCP_ENC 0x0001
#define DCP_DEC 0x0002
#define DCP_ECB 0x0004
#define DCP_CBC 0x0008
#define DCP_CBC_INIT 0x0010
#define DCP_NEW_KEY 0x0040
#define DCP_OTP_KEY 0x0080
#define DCP_AES 0x1000
/* DCP Flags */
#define DCP_FLAG_BUSY 0x01
#define DCP_FLAG_PRODUCING 0x02
/* clock defines */
#define CLOCK_ON 1
#define CLOCK_OFF 0
struct dcp_dev_req_ctx {
int mode;
};
struct dcp_op {
unsigned int flags;
u8 key[AES_KEYSIZE_128];
int keylen;
struct ablkcipher_request *req;
struct crypto_ablkcipher *fallback;
uint32_t stat;
uint32_t pkt1;
uint32_t pkt2;
struct ablkcipher_walk walk;
};
struct dcp_dev {
struct device *dev;
void __iomem *dcp_regs_base;
int dcp_vmi_irq;
int dcp_irq;
spinlock_t queue_lock;
struct crypto_queue queue;
uint32_t pkt_produced;
uint32_t pkt_consumed;
struct dcp_hw_packet *hw_pkg[DCP_MAX_PKG];
dma_addr_t hw_phys_pkg;
/* [KEY][IV] Both with 16 Bytes */
u8 *payload_base;
dma_addr_t payload_base_dma;
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
struct timer_list watchdog;
unsigned long flags;
struct dcp_op *ctx;
struct miscdevice dcp_bootstream_misc;
};
struct dcp_hw_packet {
uint32_t next;
uint32_t pkt1;
uint32_t pkt2;
uint32_t src;
uint32_t dst;
uint32_t size;
uint32_t payload;
uint32_t stat;
};
static struct dcp_dev *global_dev;
static inline u32 dcp_chan_reg(u32 reg, int chan)
{
return reg + (chan) * 0x40;
}
static inline void dcp_write(struct dcp_dev *dev, u32 data, u32 reg)
{
writel(data, dev->dcp_regs_base + reg);
}
static inline void dcp_set(struct dcp_dev *dev, u32 data, u32 reg)
{
writel(data, dev->dcp_regs_base + (reg | 0x04));
}
static inline void dcp_clear(struct dcp_dev *dev, u32 data, u32 reg)
{
writel(data, dev->dcp_regs_base + (reg | 0x08));
}
static inline void dcp_toggle(struct dcp_dev *dev, u32 data, u32 reg)
{
writel(data, dev->dcp_regs_base + (reg | 0x0C));
}
static inline unsigned int dcp_read(struct dcp_dev *dev, u32 reg)
{
return readl(dev->dcp_regs_base + reg);
}
static void dcp_dma_unmap(struct dcp_dev *dev, struct dcp_hw_packet *pkt)
{
dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
dma_unmap_page(dev->dev, pkt->dst, pkt->size, DMA_FROM_DEVICE);
dev_dbg(dev->dev, "unmap packet %x", (unsigned int) pkt);
}
static int dcp_dma_map(struct dcp_dev *dev,
struct ablkcipher_walk *walk, struct dcp_hw_packet *pkt)
{
dev_dbg(dev->dev, "map packet %x", (unsigned int) pkt);
/* align to length = 16 */
pkt->size = walk->nbytes - (walk->nbytes % 16);
pkt->src = dma_map_page(dev->dev, walk->src.page, walk->src.offset,
pkt->size, DMA_TO_DEVICE);
if (pkt->src == 0) {
dev_err(dev->dev, "Unable to map src");
return -ENOMEM;
}
pkt->dst = dma_map_page(dev->dev, walk->dst.page, walk->dst.offset,
pkt->size, DMA_FROM_DEVICE);
if (pkt->dst == 0) {
dev_err(dev->dev, "Unable to map dst");
dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
return -ENOMEM;
}
return 0;
}
static void dcp_op_one(struct dcp_dev *dev, struct dcp_hw_packet *pkt,
uint8_t last)
{
struct dcp_op *ctx = dev->ctx;
pkt->pkt1 = ctx->pkt1;
pkt->pkt2 = ctx->pkt2;
pkt->payload = (u32) dev->payload_base_dma;
pkt->stat = 0;
if (ctx->flags & DCP_CBC_INIT) {
pkt->pkt1 |= DCP_PKT_CIPHER_INIT;
ctx->flags &= ~DCP_CBC_INIT;
}
mod_timer(&dev->watchdog, jiffies + msecs_to_jiffies(500));
pkt->pkt1 |= DCP_PKT_IRQ;
if (!last)
pkt->pkt1 |= DCP_PKT_CHAIN;
dev->pkt_produced++;
dcp_write(dev, 1,
dcp_chan_reg(DCP_REG_CHAN_SEMA, USED_CHANNEL));
}
static void dcp_op_proceed(struct dcp_dev *dev)
{
struct dcp_op *ctx = dev->ctx;
struct dcp_hw_packet *pkt;
while (ctx->walk.nbytes) {
int err = 0;
pkt = dev->hw_pkg[dev->pkt_produced % DCP_MAX_PKG];
err = dcp_dma_map(dev, &ctx->walk, pkt);
if (err) {
dev->ctx->stat |= err;
/* start timer to wait for already set up calls */
mod_timer(&dev->watchdog,
jiffies + msecs_to_jiffies(500));
break;
}
err = ctx->walk.nbytes - pkt->size;
ablkcipher_walk_done(dev->ctx->req, &dev->ctx->walk, err);
dcp_op_one(dev, pkt, ctx->walk.nbytes == 0);
/* we have to wait if no space is left in buffer */
if (dev->pkt_produced - dev->pkt_consumed == DCP_MAX_PKG)
break;
}
clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
}
static void dcp_op_start(struct dcp_dev *dev, uint8_t use_walk)
{
struct dcp_op *ctx = dev->ctx;
if (ctx->flags & DCP_NEW_KEY) {
memcpy(dev->payload_base, ctx->key, ctx->keylen);
ctx->flags &= ~DCP_NEW_KEY;
}
ctx->pkt1 = 0;
ctx->pkt1 |= DCP_PKT_CIPHER_ENABLE;
ctx->pkt1 |= DCP_PKT_DECR_SEM;
if (ctx->flags & DCP_OTP_KEY)
ctx->pkt1 |= DCP_PKT_OTP_KEY;
else
ctx->pkt1 |= DCP_PKT_PAYLOAD_KEY;
if (ctx->flags & DCP_ENC)
ctx->pkt1 |= DCP_PKG_CIPHER_ENCRYPT;
ctx->pkt2 = 0;
if (ctx->flags & DCP_CBC)
ctx->pkt2 |= DCP_PKT_MODE_CBC;
dev->pkt_produced = 0;
dev->pkt_consumed = 0;
ctx->stat = 0;
dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
dcp_write(dev, (u32) dev->hw_phys_pkg,
dcp_chan_reg(DCP_REG_CHAN_PTR, USED_CHANNEL));
set_bit(DCP_FLAG_PRODUCING, &dev->flags);
if (use_walk) {
ablkcipher_walk_init(&ctx->walk, ctx->req->dst,
ctx->req->src, ctx->req->nbytes);
ablkcipher_walk_phys(ctx->req, &ctx->walk);
dcp_op_proceed(dev);
} else {
dcp_op_one(dev, dev->hw_pkg[0], 1);
clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
}
}
static void dcp_done_task(unsigned long data)
{
struct dcp_dev *dev = (struct dcp_dev *)data;
struct dcp_hw_packet *last_packet;
int fin;
fin = 0;
for (last_packet = dev->hw_pkg[(dev->pkt_consumed) % DCP_MAX_PKG];
last_packet->stat == 1;
last_packet =
dev->hw_pkg[++(dev->pkt_consumed) % DCP_MAX_PKG]) {
dcp_dma_unmap(dev, last_packet);
last_packet->stat = 0;
fin++;
}
/* the last call of this function already consumed this IRQ's packet */
if (fin == 0)
return;
dev_dbg(dev->dev,
"Packet(s) done with status %x; finished: %d, produced:%d, complete consumed: %d",
dev->ctx->stat, fin, dev->pkt_produced, dev->pkt_consumed);
last_packet = dev->hw_pkg[(dev->pkt_consumed - 1) % DCP_MAX_PKG];
if (!dev->ctx->stat && last_packet->pkt1 & DCP_PKT_CHAIN) {
if (!test_and_set_bit(DCP_FLAG_PRODUCING, &dev->flags))
dcp_op_proceed(dev);
return;
}
while (unlikely(dev->pkt_consumed < dev->pkt_produced)) {
dcp_dma_unmap(dev,
dev->hw_pkg[dev->pkt_consumed++ % DCP_MAX_PKG]);
}
if (dev->ctx->flags & DCP_OTP_KEY) {
/* we used the miscdevice, no walk to finish */
clear_bit(DCP_FLAG_BUSY, &dev->flags);
return;
}
ablkcipher_walk_complete(&dev->ctx->walk);
dev->ctx->req->base.complete(&dev->ctx->req->base,
dev->ctx->stat);
dev->ctx->req = NULL;
/* in case there are other requests in the queue */
tasklet_schedule(&dev->queue_task);
}
static void dcp_watchdog(unsigned long data)
{
struct dcp_dev *dev = (struct dcp_dev *)data;
dev->ctx->stat |= dcp_read(dev,
dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
dev_err(dev->dev, "Timeout, Channel status: %x", dev->ctx->stat);
if (!dev->ctx->stat)
dev->ctx->stat = -ETIMEDOUT;
dcp_done_task(data);
}
static irqreturn_t dcp_common_irq(int irq, void *context)
{
u32 msk;
struct dcp_dev *dev = (struct dcp_dev *) context;
del_timer(&dev->watchdog);
msk = DCP_STAT_IRQ(dcp_read(dev, DCP_REG_STAT));
dcp_clear(dev, msk, DCP_REG_STAT);
if (msk == 0)
return IRQ_NONE;
dev->ctx->stat |= dcp_read(dev,
dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
if (msk & DCP_STAT_CHAN_1)
tasklet_schedule(&dev->done_task);
return IRQ_HANDLED;
}
static irqreturn_t dcp_vmi_irq(int irq, void *context)
{
return dcp_common_irq(irq, context);
}
static irqreturn_t dcp_irq(int irq, void *context)
{
return dcp_common_irq(irq, context);
}
static void dcp_crypt(struct dcp_dev *dev, struct dcp_op *ctx)
{
dev->ctx = ctx;
if ((ctx->flags & DCP_CBC) && ctx->req->info) {
ctx->flags |= DCP_CBC_INIT;
memcpy(dev->payload_base + AES_KEYSIZE_128,
ctx->req->info, AES_KEYSIZE_128);
}
dcp_op_start(dev, 1);
}
static void dcp_queue_task(unsigned long data)
{
struct dcp_dev *dev = (struct dcp_dev *) data;
struct crypto_async_request *async_req, *backlog;
struct crypto_ablkcipher *tfm;
struct dcp_op *ctx;
struct dcp_dev_req_ctx *rctx;
struct ablkcipher_request *req;
unsigned long flags;
spin_lock_irqsave(&dev->queue_lock, flags);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
spin_unlock_irqrestore(&dev->queue_lock, flags);
if (!async_req)
goto ret_nothing_done;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
req = ablkcipher_request_cast(async_req);
tfm = crypto_ablkcipher_reqtfm(req);
rctx = ablkcipher_request_ctx(req);
ctx = crypto_ablkcipher_ctx(tfm);
if (!req->src || !req->dst)
goto ret_nothing_done;
ctx->flags |= rctx->mode;
ctx->req = req;
dcp_crypt(dev, ctx);
return;
ret_nothing_done:
clear_bit(DCP_FLAG_BUSY, &dev->flags);
}
static int dcp_cra_init(struct crypto_tfm *tfm)
{
const char *name = tfm->__crt_alg->cra_name;
struct dcp_op *ctx = crypto_tfm_ctx(tfm);
tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_dev_req_ctx);
ctx->fallback = crypto_alloc_ablkcipher(name, 0,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
dev_err(global_dev->dev, "Error allocating fallback algo %s\n",
name);
return PTR_ERR(ctx->fallback);
}
return 0;
}
static void dcp_cra_exit(struct crypto_tfm *tfm)
{
struct dcp_op *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback)
crypto_free_ablkcipher(ctx->fallback);
ctx->fallback = NULL;
}
/* async interface */
static int dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int len)
{
struct dcp_op *ctx = crypto_ablkcipher_ctx(tfm);
unsigned int ret = 0;
ctx->keylen = len;
ctx->flags = 0;
if (len == AES_KEYSIZE_128) {
if (memcmp(ctx->key, key, AES_KEYSIZE_128)) {
memcpy(ctx->key, key, len);
ctx->flags |= DCP_NEW_KEY;
}
return 0;
}
ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback->base.crt_flags |=
(tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
ret = crypto_ablkcipher_setkey(ctx->fallback, key, len);
if (ret) {
struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm_aux->crt_flags |=
(ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
}
return ret;
}
static int dcp_aes_cbc_crypt(struct ablkcipher_request *req, int mode)
{
struct dcp_dev_req_ctx *rctx = ablkcipher_request_ctx(req);
struct dcp_dev *dev = global_dev;
unsigned long flags;
int err = 0;
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
return -EINVAL;
rctx->mode = mode;
spin_lock_irqsave(&dev->queue_lock, flags);
err = ablkcipher_enqueue_request(&dev->queue, req);
spin_unlock_irqrestore(&dev->queue_lock, flags);
flags = test_and_set_bit(DCP_FLAG_BUSY, &dev->flags);
if (!(flags & DCP_FLAG_BUSY))
tasklet_schedule(&dev->queue_task);
return err;
}
static int dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
{
struct crypto_tfm *tfm =
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct dcp_op *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
int err = 0;
ablkcipher_request_set_tfm(req, ctx->fallback);
err = crypto_ablkcipher_encrypt(req);
ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
return err;
}
return dcp_aes_cbc_crypt(req, DCP_AES | DCP_ENC | DCP_CBC);
}
static int dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
{
struct crypto_tfm *tfm =
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct dcp_op *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
int err = 0;
ablkcipher_request_set_tfm(req, ctx->fallback);
err = crypto_ablkcipher_decrypt(req);
ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
return err;
}
return dcp_aes_cbc_crypt(req, DCP_AES | DCP_DEC | DCP_CBC);
}
static struct crypto_alg algs[] = {
{
.cra_name = "cbc(aes)",
.cra_driver_name = "dcp-cbc-aes",
.cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_KEYSIZE_128,
.cra_type = &crypto_ablkcipher_type,
.cra_priority = 300,
.cra_u.ablkcipher = {
.min_keysize = AES_KEYSIZE_128,
.max_keysize = AES_KEYSIZE_128,
.setkey = dcp_aes_setkey,
.encrypt = dcp_aes_cbc_encrypt,
.decrypt = dcp_aes_cbc_decrypt,
.ivsize = AES_KEYSIZE_128,
}
},
};
/* DCP bootstream verification interface: uses OTP key for crypto */
static int dcp_bootstream_open(struct inode *inode, struct file *file)
{
file->private_data = container_of((file->private_data),
struct dcp_dev, dcp_bootstream_misc);
return 0;
}
static long dcp_bootstream_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct dcp_dev *dev = (struct dcp_dev *) file->private_data;
void __user *argp = (void __user *)arg;
int ret;
if (dev == NULL)
return -EBADF;
if (cmd != DBS_ENC && cmd != DBS_DEC)
return -EINVAL;
if (copy_from_user(dev->payload_base, argp, 16))
return -EFAULT;
if (test_and_set_bit(DCP_FLAG_BUSY, &dev->flags))
return -EAGAIN;
dev->ctx = kzalloc(sizeof(struct dcp_op), GFP_KERNEL);
if (!dev->ctx) {
dev_err(dev->dev,
"cannot allocate context for OTP crypto");
clear_bit(DCP_FLAG_BUSY, &dev->flags);
return -ENOMEM;
}
dev->ctx->flags = DCP_AES | DCP_ECB | DCP_OTP_KEY | DCP_CBC_INIT;
dev->ctx->flags |= (cmd == DBS_ENC) ? DCP_ENC : DCP_DEC;
dev->hw_pkg[0]->src = dev->payload_base_dma;
dev->hw_pkg[0]->dst = dev->payload_base_dma;
dev->hw_pkg[0]->size = 16;
dcp_op_start(dev, 0);
while (test_bit(DCP_FLAG_BUSY, &dev->flags))
cpu_relax();
ret = dev->ctx->stat;
if (!ret && copy_to_user(argp, dev->payload_base, 16))
ret = -EFAULT;
kfree(dev->ctx);
return ret;
}
static const struct file_operations dcp_bootstream_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dcp_bootstream_ioctl,
.open = dcp_bootstream_open,
};
static int dcp_probe(struct platform_device *pdev)
{
struct dcp_dev *dev = NULL;
struct resource *r;
int i, ret, j;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
global_dev = dev;
dev->dev = &pdev->dev;
platform_set_drvdata(pdev, dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(dev->dcp_regs_base))
return PTR_ERR(dev->dcp_regs_base);
dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
udelay(10);
dcp_clear(dev, DCP_CTRL_SFRST | DCP_CTRL_CLKGATE, DCP_REG_CTRL);
dcp_write(dev, DCP_CTRL_GATHER_RES_WRITE |
DCP_CTRL_ENABLE_CONTEXT_CACHE | DCP_CTRL_CH_IRQ_E_1,
DCP_REG_CTRL);
dcp_write(dev, DCP_CHAN_CTRL_ENABLE_1, DCP_REG_CHAN_CTRL);
for (i = 0; i < 4; i++)
dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, i));
dcp_clear(dev, -1, DCP_REG_STAT);
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!r) {
dev_err(&pdev->dev, "can't get IRQ resource (0)\n");
return -EIO;
}
dev->dcp_vmi_irq = r->start;
ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
"dcp", dev);
if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (0)\n");
return -EIO;
}
r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!r) {
dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
return -EIO;
}
dev->dcp_irq = r->start;
ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
dev);
if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (1)\n");
return -EIO;
}
dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
&dev->hw_phys_pkg,
GFP_KERNEL);
if (!dev->hw_pkg[0]) {
dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
return -ENOMEM;
}
for (i = 1; i < DCP_MAX_PKG; i++) {
dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg
+ i * sizeof(struct dcp_hw_packet);
dev->hw_pkg[i] = dev->hw_pkg[i - 1] + 1;
}
dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg;
dev->payload_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
&dev->payload_base_dma, GFP_KERNEL);
if (!dev->payload_base) {
dev_err(&pdev->dev, "Could not allocate memory for key\n");
ret = -ENOMEM;
goto err_free_hw_packet;
}
tasklet_init(&dev->queue_task, dcp_queue_task,
(unsigned long) dev);
tasklet_init(&dev->done_task, dcp_done_task,
(unsigned long) dev);
spin_lock_init(&dev->queue_lock);
crypto_init_queue(&dev->queue, 10);
init_timer(&dev->watchdog);
dev->watchdog.function = &dcp_watchdog;
dev->watchdog.data = (unsigned long)dev;
dev->dcp_bootstream_misc.minor = MISC_DYNAMIC_MINOR,
dev->dcp_bootstream_misc.name = "dcpboot",
dev->dcp_bootstream_misc.fops = &dcp_bootstream_fops,
ret = misc_register(&dev->dcp_bootstream_misc);
if (ret != 0) {
dev_err(dev->dev, "Unable to register misc device\n");
goto err_free_key_iv;
}
for (i = 0; i < ARRAY_SIZE(algs); i++) {
algs[i].cra_priority = 300;
algs[i].cra_ctxsize = sizeof(struct dcp_op);
algs[i].cra_module = THIS_MODULE;
algs[i].cra_init = dcp_cra_init;
algs[i].cra_exit = dcp_cra_exit;
if (crypto_register_alg(&algs[i])) {
dev_err(&pdev->dev, "register algorithm failed\n");
ret = -ENOMEM;
goto err_unregister;
}
}
dev_notice(&pdev->dev, "DCP crypto enabled.!\n");
return 0;
err_unregister:
for (j = 0; j < i; j++)
crypto_unregister_alg(&algs[j]);
err_free_key_iv:
tasklet_kill(&dev->done_task);
tasklet_kill(&dev->queue_task);
dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
dev->payload_base_dma);
err_free_hw_packet:
dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
dev->hw_phys_pkg);
return ret;
}
static int dcp_remove(struct platform_device *pdev)
{
struct dcp_dev *dev;
int j;
dev = platform_get_drvdata(pdev);
misc_deregister(&dev->dcp_bootstream_misc);
for (j = 0; j < ARRAY_SIZE(algs); j++)
crypto_unregister_alg(&algs[j]);
tasklet_kill(&dev->done_task);
tasklet_kill(&dev->queue_task);
dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
dev->payload_base_dma);
dma_free_coherent(&pdev->dev,
DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
dev->hw_pkg[0], dev->hw_phys_pkg);
return 0;
}
static struct of_device_id fs_dcp_of_match[] = {
{ .compatible = "fsl-dcp"},
{},
};
static struct platform_driver fs_dcp_driver = {
.probe = dcp_probe,
.remove = dcp_remove,
.driver = {
.name = "fsl-dcp",
.owner = THIS_MODULE,
.of_match_table = fs_dcp_of_match
}
};
module_platform_driver(fs_dcp_driver);
MODULE_AUTHOR("Tobias Rauter <tobias.rauter@gmail.com>");
MODULE_DESCRIPTION("Freescale DCP Crypto Driver");
MODULE_LICENSE("GPL");

1100
drivers/crypto/mxs-dcp.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -784,6 +784,7 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
struct omap_aes_dev *dd = NULL;
int err;
/* Find AES device, currently picks the first device */
spin_lock_bh(&list_lock);
@ -792,7 +793,13 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
}
spin_unlock_bh(&list_lock);
pm_runtime_get_sync(dd->dev);
err = pm_runtime_get_sync(dd->dev);
if (err < 0) {
dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
__func__, err);
return err;
}
tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
return 0;
@ -1182,7 +1189,12 @@ static int omap_aes_probe(struct platform_device *pdev)
dd->phys_base = res.start;
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
err = pm_runtime_get_sync(dev);
if (err < 0) {
dev_err(dev, "%s: failed to get_sync(%d)\n",
__func__, err);
goto err_res;
}
omap_aes_dma_stop(dd);

Просмотреть файл

@ -789,10 +789,13 @@ static int omap_sham_update_cpu(struct omap_sham_dev *dd)
dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
ctx->bufcnt, ctx->digcnt, final);
bufcnt = ctx->bufcnt;
ctx->bufcnt = 0;
if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
bufcnt = ctx->bufcnt;
ctx->bufcnt = 0;
return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
}
return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
return 0;
}
static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
@ -1103,6 +1106,9 @@ static int omap_sham_update(struct ahash_request *req)
return 0;
}
if (dd->polling_mode)
ctx->flags |= BIT(FLAGS_CPU);
return omap_sham_enqueue(req, OP_UPDATE);
}
@ -1970,7 +1976,8 @@ err_algs:
crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
pm_runtime_disable(dev);
dma_release_channel(dd->dma_lch);
if (dd->dma_lch)
dma_release_channel(dd->dma_lch);
data_err:
dev_err(dev, "initialization failed.\n");
@ -1994,7 +2001,9 @@ static int omap_sham_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev);
dma_release_channel(dd->dma_lch);
if (dd->dma_lch)
dma_release_channel(dd->dma_lch);
return 0;
}

Просмотреть файл

@ -338,20 +338,29 @@ DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
static u32 current_desc_hdr(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int tail = priv->chan[ch].tail;
int tail, iter;
dma_addr_t cur_desc;
cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
tail = (tail + 1) & (priv->fifo_len - 1);
if (tail == priv->chan[ch].tail) {
if (!cur_desc) {
dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
return 0;
}
tail = priv->chan[ch].tail;
iter = tail;
while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
iter = (iter + 1) & (priv->fifo_len - 1);
if (iter == tail) {
dev_err(dev, "couldn't locate current descriptor\n");
return 0;
}
}
return priv->chan[ch].fifo[tail].desc->hdr;
return priv->chan[ch].fifo[iter].desc->hdr;
}
/*
@ -2486,8 +2495,6 @@ static int talitos_remove(struct platform_device *ofdev)
iounmap(priv->reg);
dev_set_drvdata(dev, NULL);
kfree(priv);
return 0;

537
include/linux/ccp.h Normal file
Просмотреть файл

@ -0,0 +1,537 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __CPP_H__
#define __CPP_H__
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include <linux/list.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
struct ccp_device;
struct ccp_cmd;
#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
* @cmd: ccp_cmd struct to be processed
*
* Refer to the ccp_cmd struct below for required fields.
*
* Queue a cmd to be processed by the CCP. If queueing the cmd
* would exceed the defined length of the cmd queue the cmd will
* only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
* result in a return code of -EBUSY.
*
* The callback routine specified in the ccp_cmd struct will be
* called to notify the caller of completion (if the cmd was not
* backlogged) or advancement out of the backlog. If the cmd has
* advanced out of the backlog the "err" value of the callback
* will be -EINPROGRESS. Any other "err" value during callback is
* the result of the operation.
*
* The cmd has been successfully queued if:
* the return code is -EINPROGRESS or
* the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
*/
int ccp_enqueue_cmd(struct ccp_cmd *cmd);
#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
{
return -ENODEV;
}
#endif /* CONFIG_CRYPTO_DEV_CCP_DD */
/***** AES engine *****/
/**
* ccp_aes_type - AES key size
*
* @CCP_AES_TYPE_128: 128-bit key
* @CCP_AES_TYPE_192: 192-bit key
* @CCP_AES_TYPE_256: 256-bit key
*/
enum ccp_aes_type {
CCP_AES_TYPE_128 = 0,
CCP_AES_TYPE_192,
CCP_AES_TYPE_256,
CCP_AES_TYPE__LAST,
};
/**
* ccp_aes_mode - AES operation mode
*
* @CCP_AES_MODE_ECB: ECB mode
* @CCP_AES_MODE_CBC: CBC mode
* @CCP_AES_MODE_OFB: OFB mode
* @CCP_AES_MODE_CFB: CFB mode
* @CCP_AES_MODE_CTR: CTR mode
* @CCP_AES_MODE_CMAC: CMAC mode
*/
enum ccp_aes_mode {
CCP_AES_MODE_ECB = 0,
CCP_AES_MODE_CBC,
CCP_AES_MODE_OFB,
CCP_AES_MODE_CFB,
CCP_AES_MODE_CTR,
CCP_AES_MODE_CMAC,
CCP_AES_MODE__LAST,
};
/**
* ccp_aes_mode - AES operation mode
*
* @CCP_AES_ACTION_DECRYPT: AES decrypt operation
* @CCP_AES_ACTION_ENCRYPT: AES encrypt operation
*/
enum ccp_aes_action {
CCP_AES_ACTION_DECRYPT = 0,
CCP_AES_ACTION_ENCRYPT,
CCP_AES_ACTION__LAST,
};
/**
* struct ccp_aes_engine - CCP AES operation
* @type: AES operation key size
* @mode: AES operation mode
* @action: AES operation (decrypt/encrypt)
* @key: key to be used for this AES operation
* @key_len: length in bytes of key
* @iv: IV to be used for this AES operation
* @iv_len: length in bytes of iv
* @src: data to be used for this operation
* @dst: data produced by this operation
* @src_len: length in bytes of data used for this operation
* @cmac_final: indicates final operation when running in CMAC mode
* @cmac_key: K1/K2 key used in final CMAC operation
* @cmac_key_len: length in bytes of cmac_key
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - type, mode, action, key, key_len, src, dst, src_len
* - iv, iv_len for any mode other than ECB
* - cmac_final for CMAC mode
* - cmac_key, cmac_key_len for CMAC mode if cmac_final is non-zero
*
* The iv variable is used as both input and output. On completion of the
* AES operation the new IV overwrites the old IV.
*/
struct ccp_aes_engine {
enum ccp_aes_type type;
enum ccp_aes_mode mode;
enum ccp_aes_action action;
struct scatterlist *key;
u32 key_len; /* In bytes */
struct scatterlist *iv;
u32 iv_len; /* In bytes */
struct scatterlist *src, *dst;
u64 src_len; /* In bytes */
u32 cmac_final; /* Indicates final cmac cmd */
struct scatterlist *cmac_key; /* K1/K2 cmac key required for
* final cmac cmd */
u32 cmac_key_len; /* In bytes */
};
/***** XTS-AES engine *****/
/**
* ccp_xts_aes_unit_size - XTS unit size
*
* @CCP_XTS_AES_UNIT_SIZE_16: Unit size of 16 bytes
* @CCP_XTS_AES_UNIT_SIZE_512: Unit size of 512 bytes
* @CCP_XTS_AES_UNIT_SIZE_1024: Unit size of 1024 bytes
* @CCP_XTS_AES_UNIT_SIZE_2048: Unit size of 2048 bytes
* @CCP_XTS_AES_UNIT_SIZE_4096: Unit size of 4096 bytes
*/
enum ccp_xts_aes_unit_size {
CCP_XTS_AES_UNIT_SIZE_16 = 0,
CCP_XTS_AES_UNIT_SIZE_512,
CCP_XTS_AES_UNIT_SIZE_1024,
CCP_XTS_AES_UNIT_SIZE_2048,
CCP_XTS_AES_UNIT_SIZE_4096,
CCP_XTS_AES_UNIT_SIZE__LAST,
};
/**
* struct ccp_xts_aes_engine - CCP XTS AES operation
* @action: AES operation (decrypt/encrypt)
* @unit_size: unit size of the XTS operation
* @key: key to be used for this XTS AES operation
* @key_len: length in bytes of key
* @iv: IV to be used for this XTS AES operation
* @iv_len: length in bytes of iv
* @src: data to be used for this operation
* @dst: data produced by this operation
* @src_len: length in bytes of data used for this operation
* @final: indicates final XTS operation
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - action, unit_size, key, key_len, iv, iv_len, src, dst, src_len, final
*
* The iv variable is used as both input and output. On completion of the
* AES operation the new IV overwrites the old IV.
*/
struct ccp_xts_aes_engine {
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
struct scatterlist *key;
u32 key_len; /* In bytes */
struct scatterlist *iv;
u32 iv_len; /* In bytes */
struct scatterlist *src, *dst;
u64 src_len; /* In bytes */
u32 final;
};
/***** SHA engine *****/
#define CCP_SHA_BLOCKSIZE SHA256_BLOCK_SIZE
#define CCP_SHA_CTXSIZE SHA256_DIGEST_SIZE
/**
* ccp_sha_type - type of SHA operation
*
* @CCP_SHA_TYPE_1: SHA-1 operation
* @CCP_SHA_TYPE_224: SHA-224 operation
* @CCP_SHA_TYPE_256: SHA-256 operation
*/
enum ccp_sha_type {
CCP_SHA_TYPE_1 = 1,
CCP_SHA_TYPE_224,
CCP_SHA_TYPE_256,
CCP_SHA_TYPE__LAST,
};
/**
* struct ccp_sha_engine - CCP SHA operation
* @type: Type of SHA operation
* @ctx: current hash value
* @ctx_len: length in bytes of hash value
* @src: data to be used for this operation
* @src_len: length in bytes of data used for this operation
* @final: indicates final SHA operation
* @msg_bits: total length of the message in bits used in final SHA operation
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - type, ctx, ctx_len, src, src_len, final
* - msg_bits if final is non-zero
*
* The ctx variable is used as both input and output. On completion of the
* SHA operation the new hash value overwrites the old hash value.
*/
struct ccp_sha_engine {
enum ccp_sha_type type;
struct scatterlist *ctx;
u32 ctx_len; /* In bytes */
struct scatterlist *src;
u64 src_len; /* In bytes */
u32 final; /* Indicates final sha cmd */
u64 msg_bits; /* Message length in bits required for
* final sha cmd */
};
/***** RSA engine *****/
/**
* struct ccp_rsa_engine - CCP RSA operation
* @key_size: length in bits of RSA key
* @exp: RSA exponent
* @exp_len: length in bytes of exponent
* @mod: RSA modulus
* @mod_len: length in bytes of modulus
* @src: data to be used for this operation
* @dst: data produced by this operation
* @src_len: length in bytes of data used for this operation
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - key_size, exp, exp_len, mod, mod_len, src, dst, src_len
*/
struct ccp_rsa_engine {
u32 key_size; /* In bits */
struct scatterlist *exp;
u32 exp_len; /* In bytes */
struct scatterlist *mod;
u32 mod_len; /* In bytes */
struct scatterlist *src, *dst;
u32 src_len; /* In bytes */
};
/***** Passthru engine *****/
/**
* ccp_passthru_bitwise - type of bitwise passthru operation
*
* @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
* @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
* @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
* @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
* @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
*/
enum ccp_passthru_bitwise {
CCP_PASSTHRU_BITWISE_NOOP = 0,
CCP_PASSTHRU_BITWISE_AND,
CCP_PASSTHRU_BITWISE_OR,
CCP_PASSTHRU_BITWISE_XOR,
CCP_PASSTHRU_BITWISE_MASK,
CCP_PASSTHRU_BITWISE__LAST,
};
/**
* ccp_passthru_byteswap - type of byteswap passthru operation
*
* @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
* @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
* @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
*/
enum ccp_passthru_byteswap {
CCP_PASSTHRU_BYTESWAP_NOOP = 0,
CCP_PASSTHRU_BYTESWAP_32BIT,
CCP_PASSTHRU_BYTESWAP_256BIT,
CCP_PASSTHRU_BYTESWAP__LAST,
};
/**
* struct ccp_passthru_engine - CCP pass-through operation
* @bit_mod: bitwise operation to perform
* @byte_swap: byteswap operation to perform
* @mask: mask to be applied to data
* @mask_len: length in bytes of mask
* @src: data to be used for this operation
* @dst: data produced by this operation
* @src_len: length in bytes of data used for this operation
* @final: indicate final pass-through operation
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - bit_mod, byte_swap, src, dst, src_len
* - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP
*/
struct ccp_passthru_engine {
enum ccp_passthru_bitwise bit_mod;
enum ccp_passthru_byteswap byte_swap;
struct scatterlist *mask;
u32 mask_len; /* In bytes */
struct scatterlist *src, *dst;
u64 src_len; /* In bytes */
u32 final;
};
/***** ECC engine *****/
#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
#define CCP_ECC_MAX_OPERANDS 6
#define CCP_ECC_MAX_OUTPUTS 3
/**
* ccp_ecc_function - type of ECC function
*
* @CCP_ECC_FUNCTION_MMUL_384BIT: 384-bit modular multiplication
* @CCP_ECC_FUNCTION_MADD_384BIT: 384-bit modular addition
* @CCP_ECC_FUNCTION_MINV_384BIT: 384-bit multiplicative inverse
* @CCP_ECC_FUNCTION_PADD_384BIT: 384-bit point addition
* @CCP_ECC_FUNCTION_PMUL_384BIT: 384-bit point multiplication
* @CCP_ECC_FUNCTION_PDBL_384BIT: 384-bit point doubling
*/
enum ccp_ecc_function {
CCP_ECC_FUNCTION_MMUL_384BIT = 0,
CCP_ECC_FUNCTION_MADD_384BIT,
CCP_ECC_FUNCTION_MINV_384BIT,
CCP_ECC_FUNCTION_PADD_384BIT,
CCP_ECC_FUNCTION_PMUL_384BIT,
CCP_ECC_FUNCTION_PDBL_384BIT,
};
/**
* struct ccp_ecc_modular_math - CCP ECC modular math parameters
* @operand_1: first operand for the modular math operation
* @operand_1_len: length of the first operand
* @operand_2: second operand for the modular math operation
* (not used for CCP_ECC_FUNCTION_MINV_384BIT)
* @operand_2_len: length of the second operand
* (not used for CCP_ECC_FUNCTION_MINV_384BIT)
* @result: result of the modular math operation
* @result_len: length of the supplied result buffer
*/
struct ccp_ecc_modular_math {
struct scatterlist *operand_1;
unsigned int operand_1_len; /* In bytes */
struct scatterlist *operand_2;
unsigned int operand_2_len; /* In bytes */
struct scatterlist *result;
unsigned int result_len; /* In bytes */
};
/**
* struct ccp_ecc_point - CCP ECC point definition
* @x: the x coordinate of the ECC point
* @x_len: the length of the x coordinate
* @y: the y coordinate of the ECC point
* @y_len: the length of the y coordinate
*/
struct ccp_ecc_point {
struct scatterlist *x;
unsigned int x_len; /* In bytes */
struct scatterlist *y;
unsigned int y_len; /* In bytes */
};
/**
* struct ccp_ecc_point_math - CCP ECC point math parameters
* @point_1: the first point of the ECC point math operation
* @point_2: the second point of the ECC point math operation
* (only used for CCP_ECC_FUNCTION_PADD_384BIT)
* @domain_a: the a parameter of the ECC curve
* @domain_a_len: the length of the a parameter
* @scalar: the scalar parameter for the point match operation
* (only used for CCP_ECC_FUNCTION_PMUL_384BIT)
* @scalar_len: the length of the scalar parameter
* (only used for CCP_ECC_FUNCTION_PMUL_384BIT)
* @result: the point resulting from the point math operation
*/
struct ccp_ecc_point_math {
struct ccp_ecc_point point_1;
struct ccp_ecc_point point_2;
struct scatterlist *domain_a;
unsigned int domain_a_len; /* In bytes */
struct scatterlist *scalar;
unsigned int scalar_len; /* In bytes */
struct ccp_ecc_point result;
};
/**
* struct ccp_ecc_engine - CCP ECC operation
* @function: ECC function to perform
* @mod: ECC modulus
* @mod_len: length in bytes of modulus
* @mm: module math parameters
* @pm: point math parameters
* @ecc_result: result of the ECC operation
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - function, mod, mod_len
* - operand, operand_len, operand_count, output, output_len, output_count
* - ecc_result
*/
struct ccp_ecc_engine {
enum ccp_ecc_function function;
struct scatterlist *mod;
u32 mod_len; /* In bytes */
union {
struct ccp_ecc_modular_math mm;
struct ccp_ecc_point_math pm;
} u;
u16 ecc_result;
};
/**
* ccp_engine - CCP operation identifiers
*
* @CCP_ENGINE_AES: AES operation
* @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
* @CCP_ENGINE_RSVD1: unused
* @CCP_ENGINE_SHA: SHA operation
* @CCP_ENGINE_RSA: RSA operation
* @CCP_ENGINE_PASSTHRU: pass-through operation
* @CCP_ENGINE_ZLIB_DECOMPRESS: unused
* @CCP_ENGINE_ECC: ECC operation
*/
enum ccp_engine {
CCP_ENGINE_AES = 0,
CCP_ENGINE_XTS_AES_128,
CCP_ENGINE_RSVD1,
CCP_ENGINE_SHA,
CCP_ENGINE_RSA,
CCP_ENGINE_PASSTHRU,
CCP_ENGINE_ZLIB_DECOMPRESS,
CCP_ENGINE_ECC,
CCP_ENGINE__LAST,
};
/* Flag values for flags member of ccp_cmd */
#define CCP_CMD_MAY_BACKLOG 0x00000001
/**
* struct ccp_cmd - CPP operation request
* @entry: list element (ccp driver use only)
* @work: work element used for callbacks (ccp driver use only)
* @ccp: CCP device to be run on (ccp driver use only)
* @ret: operation return code (ccp driver use only)
* @flags: cmd processing flags
* @engine: CCP operation to perform
* @engine_error: CCP engine return code
* @u: engine specific structures, refer to specific engine struct below
* @callback: operation completion callback function
* @data: parameter value to be supplied to the callback function
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - engine, callback
* - See the operation structures below for what is required for each
* operation.
*/
struct ccp_cmd {
/* The list_head, work_struct, ccp and ret variables are for use
* by the CCP driver only.
*/
struct list_head entry;
struct work_struct work;
struct ccp_device *ccp;
int ret;
u32 flags;
enum ccp_engine engine;
u32 engine_error;
union {
struct ccp_aes_engine aes;
struct ccp_xts_aes_engine xts;
struct ccp_sha_engine sha;
struct ccp_rsa_engine rsa;
struct ccp_passthru_engine passthru;
struct ccp_ecc_engine ecc;
} u;
/* Completion callback support */
void (*callback)(void *data, int err);
void *data;
};
#endif

Просмотреть файл

@ -37,6 +37,9 @@
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); })
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
#ifdef __CHECKER__
#define __must_be_array(arr) 0
#else

Просмотреть файл

@ -15,6 +15,7 @@
*/
#undef barrier
#undef RELOC_HIDE
#undef OPTIMIZER_HIDE_VAR
#define barrier() __memory_barrier()
@ -23,6 +24,12 @@
__ptr = (unsigned long) (ptr); \
(typeof(ptr)) (__ptr + (off)); })
/* This should act as an optimization barrier on var.
* Given that this compiler does not have inline assembly, a compiler barrier
* is the best we can do.
*/
#define OPTIMIZER_HIDE_VAR(var) barrier()
/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
#define __must_be_array(a) 0

Просмотреть файл

@ -170,6 +170,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
(typeof(ptr)) (__ptr + (off)); })
#endif
#ifndef OPTIMIZER_HIDE_VAR
#define OPTIMIZER_HIDE_VAR(var) barrier()
#endif
/* Not-quite-unique ID. */
#ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)

Просмотреть файл

@ -112,7 +112,7 @@ int padata_do_parallel(struct padata_instance *pinst,
rcu_read_lock_bh();
pd = rcu_dereference(pinst->pd);
pd = rcu_dereference_bh(pinst->pd);
err = -EINVAL;
if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)