crypto: aesni-intel - RFC4106 AES-GCM Driver Using Intel New Instructions
This patch adds an optimized RFC4106 AES-GCM implementation for 64-bit kernels. It supports 128-bit AES key size. This leverages the crypto AEAD interface type to facilitate a combined AES & GCM operation to be implemented in assembly code. The assembly code leverages Intel(R) AES New Instructions and the PCLMULQDQ instruction. Signed-off-by: Adrian Hoban <adrian.hoban@intel.com> Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com> Signed-off-by: Gabriele Paoloni <gabriele.paoloni@intel.com> Signed-off-by: Aidan O'Mahony <aidan.o.mahony@intel.com> Signed-off-by: Erdinc Ozturk <erdinc.ozturk@intel.com> Signed-off-by: James Guilford <james.guilford@intel.com> Signed-off-by: Wajdi Feghali <wajdi.k.feghali@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Родитель
895be15745
Коммит
0bd82f5f63
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -5,6 +5,14 @@
|
|||
* Copyright (C) 2008, Intel Corp.
|
||||
* Author: Huang Ying <ying.huang@intel.com>
|
||||
*
|
||||
* Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
|
||||
* interface for 64-bit kernels.
|
||||
* Authors: Adrian Hoban <adrian.hoban@intel.com>
|
||||
* Gabriele Paoloni <gabriele.paoloni@intel.com>
|
||||
* Tadeusz Struk (tadeusz.struk@intel.com)
|
||||
* Aidan O'Mahony (aidan.o.mahony@intel.com)
|
||||
* Copyright (c) 2010, Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
|
@ -21,6 +29,10 @@
|
|||
#include <crypto/ctr.h>
|
||||
#include <asm/i387.h>
|
||||
#include <asm/aes.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
|
||||
#define HAS_CTR
|
||||
|
@ -42,8 +54,31 @@ struct async_aes_ctx {
|
|||
struct cryptd_ablkcipher *cryptd_tfm;
|
||||
};
|
||||
|
||||
#define AESNI_ALIGN 16
|
||||
/* This data is stored at the end of the crypto_tfm struct.
|
||||
* It's a type of per "session" data storage location.
|
||||
* This needs to be 16 byte aligned.
|
||||
*/
|
||||
struct aesni_rfc4106_gcm_ctx {
|
||||
u8 hash_subkey[16];
|
||||
struct crypto_aes_ctx aes_key_expanded;
|
||||
u8 nonce[4];
|
||||
struct cryptd_aead *cryptd_tfm;
|
||||
};
|
||||
|
||||
struct aesni_gcm_set_hash_subkey_result {
|
||||
int err;
|
||||
struct completion completion;
|
||||
};
|
||||
|
||||
struct aesni_hash_subkey_req_data {
|
||||
u8 iv[16];
|
||||
struct aesni_gcm_set_hash_subkey_result result;
|
||||
struct scatterlist sg;
|
||||
};
|
||||
|
||||
#define AESNI_ALIGN (16)
|
||||
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
|
||||
#define RFC4106_HASH_SUBKEY_SIZE 16
|
||||
|
||||
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
||||
unsigned int key_len);
|
||||
|
@ -62,6 +97,57 @@ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
|||
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
const u8 *in, unsigned int len, u8 *iv);
|
||||
|
||||
/* asmlinkage void aesni_gcm_enc()
|
||||
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
|
||||
* u8 *out, Ciphertext output. Encrypt in-place is allowed.
|
||||
* const u8 *in, Plaintext input
|
||||
* unsigned long plaintext_len, Length of data in bytes for encryption.
|
||||
* u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
|
||||
* concatenated with 8 byte Initialisation Vector (from IPSec ESP
|
||||
* Payload) concatenated with 0x00000001. 16-byte aligned pointer.
|
||||
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
|
||||
* const u8 *aad, Additional Authentication Data (AAD)
|
||||
* unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
|
||||
* is going to be 8 or 12 bytes
|
||||
* u8 *auth_tag, Authenticated Tag output.
|
||||
* unsigned long auth_tag_len), Authenticated Tag Length in bytes.
|
||||
* Valid values are 16 (most likely), 12 or 8.
|
||||
*/
|
||||
asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
|
||||
const u8 *in, unsigned long plaintext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len);
|
||||
|
||||
/* asmlinkage void aesni_gcm_dec()
|
||||
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
|
||||
* u8 *out, Plaintext output. Decrypt in-place is allowed.
|
||||
* const u8 *in, Ciphertext input
|
||||
* unsigned long ciphertext_len, Length of data in bytes for decryption.
|
||||
* u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
|
||||
* concatenated with 8 byte Initialisation Vector (from IPSec ESP
|
||||
* Payload) concatenated with 0x00000001. 16-byte aligned pointer.
|
||||
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
|
||||
* const u8 *aad, Additional Authentication Data (AAD)
|
||||
* unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
|
||||
* to be 8 or 12 bytes
|
||||
* u8 *auth_tag, Authenticated Tag output.
|
||||
* unsigned long auth_tag_len) Authenticated Tag Length in bytes.
|
||||
* Valid values are 16 (most likely), 12 or 8.
|
||||
*/
|
||||
asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
|
||||
const u8 *in, unsigned long ciphertext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len);
|
||||
|
||||
static inline struct
|
||||
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
|
||||
{
|
||||
return
|
||||
(struct aesni_rfc4106_gcm_ctx *)
|
||||
PTR_ALIGN((u8 *)
|
||||
crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
|
||||
}
|
||||
|
||||
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
|
||||
{
|
||||
unsigned long addr = (unsigned long)raw_ctx;
|
||||
|
@ -730,6 +816,422 @@ static struct crypto_alg ablk_xts_alg = {
|
|||
};
|
||||
#endif
|
||||
|
||||
static int rfc4106_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct cryptd_aead *cryptd_tfm;
|
||||
struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
|
||||
PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
|
||||
cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
|
||||
if (IS_ERR(cryptd_tfm))
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
ctx->cryptd_tfm = cryptd_tfm;
|
||||
tfm->crt_aead.reqsize = sizeof(struct aead_request)
|
||||
+ crypto_aead_reqsize(&cryptd_tfm->base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rfc4106_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct aesni_rfc4106_gcm_ctx *ctx =
|
||||
(struct aesni_rfc4106_gcm_ctx *)
|
||||
PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
|
||||
if (!IS_ERR(ctx->cryptd_tfm))
|
||||
cryptd_free_aead(ctx->cryptd_tfm);
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct aesni_gcm_set_hash_subkey_result *result = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
result->err = err;
|
||||
complete(&result->completion);
|
||||
}
|
||||
|
||||
static int
|
||||
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
|
||||
{
|
||||
struct crypto_ablkcipher *ctr_tfm;
|
||||
struct ablkcipher_request *req;
|
||||
int ret = -EINVAL;
|
||||
struct aesni_hash_subkey_req_data *req_data;
|
||||
|
||||
ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
|
||||
if (IS_ERR(ctr_tfm))
|
||||
return PTR_ERR(ctr_tfm);
|
||||
|
||||
crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
|
||||
|
||||
ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
|
||||
if (ret) {
|
||||
crypto_free_ablkcipher(ctr_tfm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
crypto_free_ablkcipher(ctr_tfm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
|
||||
if (!req_data) {
|
||||
crypto_free_ablkcipher(ctr_tfm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(req_data->iv, 0, sizeof(req_data->iv));
|
||||
|
||||
/* Clear the data in the hash sub key container to zero.*/
|
||||
/* We want to cipher all zeros to create the hash sub key. */
|
||||
memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
|
||||
|
||||
init_completion(&req_data->result.completion);
|
||||
sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
|
||||
ablkcipher_request_set_tfm(req, ctr_tfm);
|
||||
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
rfc4106_set_hash_subkey_done,
|
||||
&req_data->result);
|
||||
|
||||
ablkcipher_request_set_crypt(req, &req_data->sg,
|
||||
&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
|
||||
|
||||
ret = crypto_ablkcipher_encrypt(req);
|
||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||
ret = wait_for_completion_interruptible
|
||||
(&req_data->result.completion);
|
||||
if (!ret)
|
||||
ret = req_data->result.err;
|
||||
}
|
||||
ablkcipher_request_free(req);
|
||||
kfree(req_data);
|
||||
crypto_free_ablkcipher(ctr_tfm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
int ret = 0;
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(parent);
|
||||
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
|
||||
u8 *new_key_mem = NULL;
|
||||
|
||||
if (key_len < 4) {
|
||||
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
/*Account for 4 byte nonce at the end.*/
|
||||
key_len -= 4;
|
||||
if (key_len != AES_KEYSIZE_128) {
|
||||
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
|
||||
/*This must be on a 16 byte boundary!*/
|
||||
if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
|
||||
return -EINVAL;
|
||||
|
||||
if ((unsigned long)key % AESNI_ALIGN) {
|
||||
/*key is not aligned: use an auxuliar aligned pointer*/
|
||||
new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
|
||||
if (!new_key_mem)
|
||||
return -ENOMEM;
|
||||
|
||||
new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
|
||||
memcpy(new_key_mem, key, key_len);
|
||||
key = new_key_mem;
|
||||
}
|
||||
|
||||
if (!irq_fpu_usable())
|
||||
ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
|
||||
key, key_len);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
|
||||
kernel_fpu_end();
|
||||
}
|
||||
/*This must be on a 16 byte boundary!*/
|
||||
if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
|
||||
exit:
|
||||
kfree(new_key_mem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This is the Integrity Check Value (aka the authentication tag length and can
|
||||
* be 8, 12 or 16 bytes long. */
|
||||
static int rfc4106_set_authsize(struct crypto_aead *parent,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
|
||||
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
|
||||
|
||||
switch (authsize) {
|
||||
case 8:
|
||||
case 12:
|
||||
case 16:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
crypto_aead_crt(parent)->authsize = authsize;
|
||||
crypto_aead_crt(cryptd_child)->authsize = authsize;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rfc4106_encrypt(struct aead_request *req)
|
||||
{
|
||||
int ret;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
||||
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
|
||||
|
||||
if (!irq_fpu_usable()) {
|
||||
struct aead_request *cryptd_req =
|
||||
(struct aead_request *) aead_request_ctx(req);
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
|
||||
return crypto_aead_encrypt(cryptd_req);
|
||||
} else {
|
||||
kernel_fpu_begin();
|
||||
ret = cryptd_child->base.crt_aead.encrypt(req);
|
||||
kernel_fpu_end();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
static int rfc4106_decrypt(struct aead_request *req)
|
||||
{
|
||||
int ret;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
||||
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
|
||||
|
||||
if (!irq_fpu_usable()) {
|
||||
struct aead_request *cryptd_req =
|
||||
(struct aead_request *) aead_request_ctx(req);
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
|
||||
return crypto_aead_decrypt(cryptd_req);
|
||||
} else {
|
||||
kernel_fpu_begin();
|
||||
ret = cryptd_child->base.crt_aead.decrypt(req);
|
||||
kernel_fpu_end();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
static struct crypto_alg rfc4106_alg = {
|
||||
.cra_name = "rfc4106(gcm(aes))",
|
||||
.cra_driver_name = "rfc4106-gcm-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_nivaead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
|
||||
.cra_init = rfc4106_init,
|
||||
.cra_exit = rfc4106_exit,
|
||||
.cra_u = {
|
||||
.aead = {
|
||||
.setkey = rfc4106_set_key,
|
||||
.setauthsize = rfc4106_set_authsize,
|
||||
.encrypt = rfc4106_encrypt,
|
||||
.decrypt = rfc4106_decrypt,
|
||||
.geniv = "seqiv",
|
||||
.ivsize = 8,
|
||||
.maxauthsize = 16,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static int __driver_rfc4106_encrypt(struct aead_request *req)
|
||||
{
|
||||
u8 one_entry_in_sg = 0;
|
||||
u8 *src, *dst, *assoc;
|
||||
__be32 counter = cpu_to_be32(1);
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
||||
void *aes_ctx = &(ctx->aes_key_expanded);
|
||||
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
||||
u8 iv_tab[16+AESNI_ALIGN];
|
||||
u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
|
||||
struct scatter_walk src_sg_walk;
|
||||
struct scatter_walk assoc_sg_walk;
|
||||
struct scatter_walk dst_sg_walk;
|
||||
unsigned int i;
|
||||
|
||||
/* Assuming we are supporting rfc4106 64-bit extended */
|
||||
/* sequence numbers We need to have the AAD length equal */
|
||||
/* to 8 or 12 bytes */
|
||||
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
|
||||
return -EINVAL;
|
||||
/* IV below built */
|
||||
for (i = 0; i < 4; i++)
|
||||
*(iv+i) = ctx->nonce[i];
|
||||
for (i = 0; i < 8; i++)
|
||||
*(iv+4+i) = req->iv[i];
|
||||
*((__be32 *)(iv+12)) = counter;
|
||||
|
||||
if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
|
||||
one_entry_in_sg = 1;
|
||||
scatterwalk_start(&src_sg_walk, req->src);
|
||||
scatterwalk_start(&assoc_sg_walk, req->assoc);
|
||||
src = scatterwalk_map(&src_sg_walk, 0);
|
||||
assoc = scatterwalk_map(&assoc_sg_walk, 0);
|
||||
dst = src;
|
||||
if (unlikely(req->src != req->dst)) {
|
||||
scatterwalk_start(&dst_sg_walk, req->dst);
|
||||
dst = scatterwalk_map(&dst_sg_walk, 0);
|
||||
}
|
||||
|
||||
} else {
|
||||
/* Allocate memory for src, dst, assoc */
|
||||
src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
|
||||
GFP_ATOMIC);
|
||||
if (unlikely(!src))
|
||||
return -ENOMEM;
|
||||
assoc = (src + req->cryptlen + auth_tag_len);
|
||||
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
|
||||
scatterwalk_map_and_copy(assoc, req->assoc, 0,
|
||||
req->assoclen, 0);
|
||||
dst = src;
|
||||
}
|
||||
|
||||
aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
|
||||
ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
|
||||
+ ((unsigned long)req->cryptlen), auth_tag_len);
|
||||
|
||||
/* The authTag (aka the Integrity Check Value) needs to be written
|
||||
* back to the packet. */
|
||||
if (one_entry_in_sg) {
|
||||
if (unlikely(req->src != req->dst)) {
|
||||
scatterwalk_unmap(dst, 0);
|
||||
scatterwalk_done(&dst_sg_walk, 0, 0);
|
||||
}
|
||||
scatterwalk_unmap(src, 0);
|
||||
scatterwalk_unmap(assoc, 0);
|
||||
scatterwalk_done(&src_sg_walk, 0, 0);
|
||||
scatterwalk_done(&assoc_sg_walk, 0, 0);
|
||||
} else {
|
||||
scatterwalk_map_and_copy(dst, req->dst, 0,
|
||||
req->cryptlen + auth_tag_len, 1);
|
||||
kfree(src);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __driver_rfc4106_decrypt(struct aead_request *req)
|
||||
{
|
||||
u8 one_entry_in_sg = 0;
|
||||
u8 *src, *dst, *assoc;
|
||||
unsigned long tempCipherLen = 0;
|
||||
__be32 counter = cpu_to_be32(1);
|
||||
int retval = 0;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
||||
void *aes_ctx = &(ctx->aes_key_expanded);
|
||||
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
||||
u8 iv_and_authTag[32+AESNI_ALIGN];
|
||||
u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
|
||||
u8 *authTag = iv + 16;
|
||||
struct scatter_walk src_sg_walk;
|
||||
struct scatter_walk assoc_sg_walk;
|
||||
struct scatter_walk dst_sg_walk;
|
||||
unsigned int i;
|
||||
|
||||
if (unlikely((req->cryptlen < auth_tag_len) ||
|
||||
(req->assoclen != 8 && req->assoclen != 12)))
|
||||
return -EINVAL;
|
||||
/* Assuming we are supporting rfc4106 64-bit extended */
|
||||
/* sequence numbers We need to have the AAD length */
|
||||
/* equal to 8 or 12 bytes */
|
||||
|
||||
tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
|
||||
/* IV below built */
|
||||
for (i = 0; i < 4; i++)
|
||||
*(iv+i) = ctx->nonce[i];
|
||||
for (i = 0; i < 8; i++)
|
||||
*(iv+4+i) = req->iv[i];
|
||||
*((__be32 *)(iv+12)) = counter;
|
||||
|
||||
if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
|
||||
one_entry_in_sg = 1;
|
||||
scatterwalk_start(&src_sg_walk, req->src);
|
||||
scatterwalk_start(&assoc_sg_walk, req->assoc);
|
||||
src = scatterwalk_map(&src_sg_walk, 0);
|
||||
assoc = scatterwalk_map(&assoc_sg_walk, 0);
|
||||
dst = src;
|
||||
if (unlikely(req->src != req->dst)) {
|
||||
scatterwalk_start(&dst_sg_walk, req->dst);
|
||||
dst = scatterwalk_map(&dst_sg_walk, 0);
|
||||
}
|
||||
|
||||
} else {
|
||||
/* Allocate memory for src, dst, assoc */
|
||||
src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
assoc = (src + req->cryptlen + auth_tag_len);
|
||||
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
|
||||
scatterwalk_map_and_copy(assoc, req->assoc, 0,
|
||||
req->assoclen, 0);
|
||||
dst = src;
|
||||
}
|
||||
|
||||
aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
|
||||
ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
|
||||
authTag, auth_tag_len);
|
||||
|
||||
/* Compare generated tag with passed in tag. */
|
||||
retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
|
||||
-EBADMSG : 0;
|
||||
|
||||
if (one_entry_in_sg) {
|
||||
if (unlikely(req->src != req->dst)) {
|
||||
scatterwalk_unmap(dst, 0);
|
||||
scatterwalk_done(&dst_sg_walk, 0, 0);
|
||||
}
|
||||
scatterwalk_unmap(src, 0);
|
||||
scatterwalk_unmap(assoc, 0);
|
||||
scatterwalk_done(&src_sg_walk, 0, 0);
|
||||
scatterwalk_done(&assoc_sg_walk, 0, 0);
|
||||
} else {
|
||||
scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
|
||||
kfree(src);
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
static struct crypto_alg __rfc4106_alg = {
|
||||
.cra_name = "__gcm-aes-aesni",
|
||||
.cra_driver_name = "__driver-gcm-aes-aesni",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_aead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
|
||||
.cra_u = {
|
||||
.aead = {
|
||||
.encrypt = __driver_rfc4106_encrypt,
|
||||
.decrypt = __driver_rfc4106_decrypt,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static int __init aesni_init(void)
|
||||
{
|
||||
int err;
|
||||
|
@ -738,6 +1240,7 @@ static int __init aesni_init(void)
|
|||
printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if ((err = crypto_register_alg(&aesni_alg)))
|
||||
goto aes_err;
|
||||
if ((err = crypto_register_alg(&__aesni_alg)))
|
||||
|
@ -770,10 +1273,19 @@ static int __init aesni_init(void)
|
|||
if ((err = crypto_register_alg(&ablk_xts_alg)))
|
||||
goto ablk_xts_err;
|
||||
#endif
|
||||
|
||||
err = crypto_register_alg(&__rfc4106_alg);
|
||||
if (err)
|
||||
goto __aead_gcm_err;
|
||||
err = crypto_register_alg(&rfc4106_alg);
|
||||
if (err)
|
||||
goto aead_gcm_err;
|
||||
return err;
|
||||
|
||||
aead_gcm_err:
|
||||
crypto_unregister_alg(&__rfc4106_alg);
|
||||
__aead_gcm_err:
|
||||
#ifdef HAS_XTS
|
||||
crypto_unregister_alg(&ablk_xts_alg);
|
||||
ablk_xts_err:
|
||||
#endif
|
||||
#ifdef HAS_PCBC
|
||||
|
@ -809,6 +1321,8 @@ aes_err:
|
|||
|
||||
static void __exit aesni_exit(void)
|
||||
{
|
||||
crypto_unregister_alg(&__rfc4106_alg);
|
||||
crypto_unregister_alg(&rfc4106_alg);
|
||||
#ifdef HAS_XTS
|
||||
crypto_unregister_alg(&ablk_xts_alg);
|
||||
#endif
|
||||
|
|
Загрузка…
Ссылка в новой задаче