WSL2-Linux-Kernel/crypto/authenc.c

478 строки
13 KiB
C
Исходник Обычный вид История

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Authenc: Simple AEAD wrapper for IPsec
*
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
struct authenc_instance_ctx {
struct crypto_ahash_spawn auth;
struct crypto_skcipher_spawn enc;
unsigned int reqoff;
};
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_sync_skcipher *null;
};
struct authenc_request_ctx {
struct scatterlist src[2];
struct scatterlist dst[2];
char tail[];
};
static void authenc_request_complete(struct aead_request *req, int err)
{
if (err != -EINPROGRESS)
aead_request_complete(req, err);
}
int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
unsigned int keylen)
{
struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
if (!RTA_OK(rta, keylen))
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
crypto: authenc - fix parsing key with misaligned rta_len Keys for "authenc" AEADs are formatted as an rtattr containing a 4-byte 'enckeylen', followed by an authentication key and an encryption key. crypto_authenc_extractkeys() parses the key to find the inner keys. However, it fails to consider the case where the rtattr's payload is longer than 4 bytes but not 4-byte aligned, and where the key ends before the next 4-byte aligned boundary. In this case, 'keylen -= RTA_ALIGN(rta->rta_len);' underflows to a value near UINT_MAX. This causes a buffer overread and crash during crypto_ahash_setkey(). Fix it by restricting the rtattr payload to the expected size. Reproducer using AF_ALG: #include <linux/if_alg.h> #include <linux/rtnetlink.h> #include <sys/socket.h> int main() { int fd; struct sockaddr_alg addr = { .salg_type = "aead", .salg_name = "authenc(hmac(sha256),cbc(aes))", }; struct { struct rtattr attr; __be32 enckeylen; char keys[1]; } __attribute__((packed)) key = { .attr.rta_len = sizeof(key), .attr.rta_type = 1 /* CRYPTO_AUTHENC_KEYA_PARAM */, }; fd = socket(AF_ALG, SOCK_SEQPACKET, 0); bind(fd, (void *)&addr, sizeof(addr)); setsockopt(fd, SOL_ALG, ALG_SET_KEY, &key, sizeof(key)); } It caused: BUG: unable to handle kernel paging request at ffff88007ffdc000 PGD 2e01067 P4D 2e01067 PUD 2e04067 PMD 2e05067 PTE 0 Oops: 0000 [#1] SMP CPU: 0 PID: 883 Comm: authenc Not tainted 4.20.0-rc1-00108-g00c9fe37a7f27 #13 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-20181126_142135-anatol 04/01/2014 RIP: 0010:sha256_ni_transform+0xb3/0x330 arch/x86/crypto/sha256_ni_asm.S:155 [...] Call Trace: sha256_ni_finup+0x10/0x20 arch/x86/crypto/sha256_ssse3_glue.c:321 crypto_shash_finup+0x1a/0x30 crypto/shash.c:178 shash_digest_unaligned+0x45/0x60 crypto/shash.c:186 crypto_shash_digest+0x24/0x40 crypto/shash.c:202 hmac_setkey+0x135/0x1e0 crypto/hmac.c:66 crypto_shash_setkey+0x2b/0xb0 crypto/shash.c:66 shash_async_setkey+0x10/0x20 crypto/shash.c:223 crypto_ahash_setkey+0x2d/0xa0 crypto/ahash.c:202 crypto_authenc_setkey+0x68/0x100 crypto/authenc.c:96 crypto_aead_setkey+0x2a/0xc0 crypto/aead.c:62 aead_setkey+0xc/0x10 crypto/algif_aead.c:526 alg_setkey crypto/af_alg.c:223 [inline] alg_setsockopt+0xfe/0x130 crypto/af_alg.c:256 __sys_setsockopt+0x6d/0xd0 net/socket.c:1902 __do_sys_setsockopt net/socket.c:1913 [inline] __se_sys_setsockopt net/socket.c:1910 [inline] __x64_sys_setsockopt+0x1f/0x30 net/socket.c:1910 do_syscall_64+0x4a/0x180 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe Fixes: e236d4a89a2f ("[CRYPTO] authenc: Move enckeylen into key itself") Cc: <stable@vger.kernel.org> # v2.6.25+ Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-12-17 10:23:22 +03:00
/*
* RTA_OK() didn't align the rtattr's payload when validating that it
* fits in the buffer. Yet, the keys should start on the next 4-byte
* aligned boundary. To avoid confusion, require that the rtattr
* payload be exactly the param struct, which has a 4-byte aligned size.
*/
if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL;
crypto: authenc - fix parsing key with misaligned rta_len Keys for "authenc" AEADs are formatted as an rtattr containing a 4-byte 'enckeylen', followed by an authentication key and an encryption key. crypto_authenc_extractkeys() parses the key to find the inner keys. However, it fails to consider the case where the rtattr's payload is longer than 4 bytes but not 4-byte aligned, and where the key ends before the next 4-byte aligned boundary. In this case, 'keylen -= RTA_ALIGN(rta->rta_len);' underflows to a value near UINT_MAX. This causes a buffer overread and crash during crypto_ahash_setkey(). Fix it by restricting the rtattr payload to the expected size. Reproducer using AF_ALG: #include <linux/if_alg.h> #include <linux/rtnetlink.h> #include <sys/socket.h> int main() { int fd; struct sockaddr_alg addr = { .salg_type = "aead", .salg_name = "authenc(hmac(sha256),cbc(aes))", }; struct { struct rtattr attr; __be32 enckeylen; char keys[1]; } __attribute__((packed)) key = { .attr.rta_len = sizeof(key), .attr.rta_type = 1 /* CRYPTO_AUTHENC_KEYA_PARAM */, }; fd = socket(AF_ALG, SOCK_SEQPACKET, 0); bind(fd, (void *)&addr, sizeof(addr)); setsockopt(fd, SOL_ALG, ALG_SET_KEY, &key, sizeof(key)); } It caused: BUG: unable to handle kernel paging request at ffff88007ffdc000 PGD 2e01067 P4D 2e01067 PUD 2e04067 PMD 2e05067 PTE 0 Oops: 0000 [#1] SMP CPU: 0 PID: 883 Comm: authenc Not tainted 4.20.0-rc1-00108-g00c9fe37a7f27 #13 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-20181126_142135-anatol 04/01/2014 RIP: 0010:sha256_ni_transform+0xb3/0x330 arch/x86/crypto/sha256_ni_asm.S:155 [...] Call Trace: sha256_ni_finup+0x10/0x20 arch/x86/crypto/sha256_ssse3_glue.c:321 crypto_shash_finup+0x1a/0x30 crypto/shash.c:178 shash_digest_unaligned+0x45/0x60 crypto/shash.c:186 crypto_shash_digest+0x24/0x40 crypto/shash.c:202 hmac_setkey+0x135/0x1e0 crypto/hmac.c:66 crypto_shash_setkey+0x2b/0xb0 crypto/shash.c:66 shash_async_setkey+0x10/0x20 crypto/shash.c:223 crypto_ahash_setkey+0x2d/0xa0 crypto/ahash.c:202 crypto_authenc_setkey+0x68/0x100 crypto/authenc.c:96 crypto_aead_setkey+0x2a/0xc0 crypto/aead.c:62 aead_setkey+0xc/0x10 crypto/algif_aead.c:526 alg_setkey crypto/af_alg.c:223 [inline] alg_setsockopt+0xfe/0x130 crypto/af_alg.c:256 __sys_setsockopt+0x6d/0xd0 net/socket.c:1902 __do_sys_setsockopt net/socket.c:1913 [inline] __se_sys_setsockopt net/socket.c:1910 [inline] __x64_sys_setsockopt+0x1f/0x30 net/socket.c:1910 do_syscall_64+0x4a/0x180 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe Fixes: e236d4a89a2f ("[CRYPTO] authenc: Move enckeylen into key itself") Cc: <stable@vger.kernel.org> # v2.6.25+ Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-12-17 10:23:22 +03:00
BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen);
crypto: authenc - fix parsing key with misaligned rta_len Keys for "authenc" AEADs are formatted as an rtattr containing a 4-byte 'enckeylen', followed by an authentication key and an encryption key. crypto_authenc_extractkeys() parses the key to find the inner keys. However, it fails to consider the case where the rtattr's payload is longer than 4 bytes but not 4-byte aligned, and where the key ends before the next 4-byte aligned boundary. In this case, 'keylen -= RTA_ALIGN(rta->rta_len);' underflows to a value near UINT_MAX. This causes a buffer overread and crash during crypto_ahash_setkey(). Fix it by restricting the rtattr payload to the expected size. Reproducer using AF_ALG: #include <linux/if_alg.h> #include <linux/rtnetlink.h> #include <sys/socket.h> int main() { int fd; struct sockaddr_alg addr = { .salg_type = "aead", .salg_name = "authenc(hmac(sha256),cbc(aes))", }; struct { struct rtattr attr; __be32 enckeylen; char keys[1]; } __attribute__((packed)) key = { .attr.rta_len = sizeof(key), .attr.rta_type = 1 /* CRYPTO_AUTHENC_KEYA_PARAM */, }; fd = socket(AF_ALG, SOCK_SEQPACKET, 0); bind(fd, (void *)&addr, sizeof(addr)); setsockopt(fd, SOL_ALG, ALG_SET_KEY, &key, sizeof(key)); } It caused: BUG: unable to handle kernel paging request at ffff88007ffdc000 PGD 2e01067 P4D 2e01067 PUD 2e04067 PMD 2e05067 PTE 0 Oops: 0000 [#1] SMP CPU: 0 PID: 883 Comm: authenc Not tainted 4.20.0-rc1-00108-g00c9fe37a7f27 #13 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-20181126_142135-anatol 04/01/2014 RIP: 0010:sha256_ni_transform+0xb3/0x330 arch/x86/crypto/sha256_ni_asm.S:155 [...] Call Trace: sha256_ni_finup+0x10/0x20 arch/x86/crypto/sha256_ssse3_glue.c:321 crypto_shash_finup+0x1a/0x30 crypto/shash.c:178 shash_digest_unaligned+0x45/0x60 crypto/shash.c:186 crypto_shash_digest+0x24/0x40 crypto/shash.c:202 hmac_setkey+0x135/0x1e0 crypto/hmac.c:66 crypto_shash_setkey+0x2b/0xb0 crypto/shash.c:66 shash_async_setkey+0x10/0x20 crypto/shash.c:223 crypto_ahash_setkey+0x2d/0xa0 crypto/ahash.c:202 crypto_authenc_setkey+0x68/0x100 crypto/authenc.c:96 crypto_aead_setkey+0x2a/0xc0 crypto/aead.c:62 aead_setkey+0xc/0x10 crypto/algif_aead.c:526 alg_setkey crypto/af_alg.c:223 [inline] alg_setsockopt+0xfe/0x130 crypto/af_alg.c:256 __sys_setsockopt+0x6d/0xd0 net/socket.c:1902 __do_sys_setsockopt net/socket.c:1913 [inline] __se_sys_setsockopt net/socket.c:1910 [inline] __x64_sys_setsockopt+0x1f/0x30 net/socket.c:1910 do_syscall_64+0x4a/0x180 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe Fixes: e236d4a89a2f ("[CRYPTO] authenc: Move enckeylen into key itself") Cc: <stable@vger.kernel.org> # v2.6.25+ Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-12-17 10:23:22 +03:00
key += rta->rta_len;
keylen -= rta->rta_len;
if (keylen < keys->enckeylen)
return -EINVAL;
keys->authkeylen = keylen - keys->enckeylen;
keys->authkey = key;
keys->enckey = key + keys->authkeylen;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_ahash *auth = ctx->auth;
struct crypto_skcipher *enc = ctx->enc;
struct crypto_authenc_keys keys;
int err = -EINVAL;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to make the ->setkey() functions provide more information about errors. However, no one actually checks for this flag, which makes it pointless. Also, many algorithms fail to set this flag when given a bad length key. Reviewing just the generic implementations, this is the case for aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309, rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably many more in arch/*/crypto/ and drivers/crypto/. Some algorithms can even set this flag when the key is the correct length. For example, authenc and authencesn set it when the key payload is malformed in any way (not just a bad length), the atmel-sha and ccree drivers can set it if a memory allocation fails, and the chelsio driver sets it for bad auth tag lengths, not just bad key lengths. So even if someone actually wanted to start checking this flag (which seems unlikely, since it's been unused for a long time), there would be a lot of work needed to get it working correctly. But it would probably be much better to go back to the drawing board and just define different return values, like -EINVAL if the key is invalid for the algorithm vs. -EKEYREJECTED if the key was rejected by a policy like "no weak keys". That would be much simpler, less error-prone, and easier to test. So just remove this flag. Signed-off-by: Eric Biggers <ebiggers@google.com> Reviewed-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 06:19:36 +03:00
goto out;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
if (err)
goto out;
crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
out:
memzero_explicit(&keys, sizeof(keys));
return err;
}
static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, req->dst,
req->assoclen + req->cryptlen,
crypto_aead_authsize(authenc), 1);
out:
aead_request_complete(req, err);
}
static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_ahash *auth = ctx->auth;
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
u8 *hash = areq_ctx->tail;
int err;
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1);
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, req->dst, hash,
req->assoclen + req->cryptlen);
ahash_request_set_callback(ahreq, flags,
authenc_geniv_ahash_done, req);
err = crypto_ahash_digest(ahreq);
if (err)
return err;
scatterwalk_map_and_copy(hash, req->dst, req->assoclen + req->cryptlen,
crypto_aead_authsize(authenc), 1);
return 0;
}
static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
int err)
{
struct aead_request *areq = req->data;
if (err)
goto out;
err = crypto_authenc_genicv(areq, 0);
out:
authenc_request_complete(areq, err);
}
static int crypto_authenc_copy_assoc(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
skcipher_request_set_sync_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
NULL);
return crypto_skcipher_encrypt(skreq);
}
static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_skcipher *enc = ctx->enc;
unsigned int cryptlen = req->cryptlen;
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ictx->reqoff);
struct scatterlist *src, *dst;
int err;
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
dst = src;
if (req->src != req->dst) {
err = crypto_authenc_copy_assoc(req);
if (err)
return err;
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
}
skcipher_request_set_tfm(skreq, enc);
skcipher_request_set_callback(skreq, aead_request_flags(req),
crypto_authenc_encrypt_done, req);
skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
err = crypto_skcipher_encrypt(skreq);
if (err)
return err;
return crypto_authenc_genicv(req, aead_request_flags(req));
}
static int crypto_authenc_decrypt_tail(struct aead_request *req,
unsigned int flags)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ictx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc);
u8 *ihash = ahreq->result + authsize;
struct scatterlist *src, *dst;
scatterwalk_map_and_copy(ihash, req->src, ahreq->nbytes, authsize, 0);
if (crypto_memneq(ihash, ahreq->result, authsize))
return -EBADMSG;
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
dst = src;
if (req->src != req->dst)
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
skcipher_request_set_tfm(skreq, ctx->enc);
skcipher_request_set_callback(skreq, aead_request_flags(req),
req->base.complete, req->base.data);
skcipher_request_set_crypt(skreq, src, dst,
req->cryptlen - authsize, req->iv);
return crypto_skcipher_decrypt(skreq);
}
static void authenc_verify_ahash_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
if (err)
goto out;
err = crypto_authenc_decrypt_tail(req, 0);
out:
authenc_request_complete(req, err);
}
static int crypto_authenc_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(authenc);
struct aead_instance *inst = aead_alg_instance(authenc);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_ahash *auth = ctx->auth;
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
u8 *hash = areq_ctx->tail;
int err;
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1);
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, req->src, hash,
req->assoclen + req->cryptlen - authsize);
ahash_request_set_callback(ahreq, aead_request_flags(req),
authenc_verify_ahash_done, req);
err = crypto_ahash_digest(ahreq);
if (err)
return err;
return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
}
static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
ctx->auth = auth;
ctx->enc = enc;
ctx->null = null;
crypto_aead_set_reqsize(
tfm,
sizeof(struct authenc_request_ctx) +
ictx->reqoff +
max_t(unsigned int,
crypto_ahash_reqsize(auth) +
sizeof(struct ahash_request),
sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(enc)));
return 0;
err_free_skcipher:
crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
}
static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
{
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
{
struct authenc_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->enc);
crypto_drop_ahash(&ctx->auth);
kfree(inst);
}
static int crypto_authenc_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct authenc_instance_ctx *ctx;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
struct skcipher_alg *enc;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return -EINVAL;
mask = crypto_requires_sync(algt->type, algt->mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = aead_instance_ctx(inst);
err = crypto_grab_ahash(&ctx->auth, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
auth = crypto_spawn_ahash_alg(&ctx->auth);
auth_base = &auth->base;
err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
goto err_free_inst;
enc = crypto_spawn_skcipher_alg(&ctx->enc);
ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
auth_base->cra_alignmask + 1);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"authenc(%s,%s)", auth_base->cra_name,
enc->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authenc(%s,%s)", auth_base->cra_driver_name,
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_flags = (auth_base->cra_flags |
enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
enc->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
inst->alg.maxauthsize = auth->digestsize;
inst->alg.init = crypto_authenc_init_tfm;
inst->alg.exit = crypto_authenc_exit_tfm;
inst->alg.setkey = crypto_authenc_setkey;
inst->alg.encrypt = crypto_authenc_encrypt;
inst->alg.decrypt = crypto_authenc_decrypt;
inst->free = crypto_authenc_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_authenc_free(inst);
}
return err;
}
static struct crypto_template crypto_authenc_tmpl = {
.name = "authenc",
.create = crypto_authenc_create,
.module = THIS_MODULE,
};
static int __init crypto_authenc_module_init(void)
{
return crypto_register_template(&crypto_authenc_tmpl);
}
static void __exit crypto_authenc_module_exit(void)
{
crypto_unregister_template(&crypto_authenc_tmpl);
}
subsys_initcall(crypto_authenc_module_init);
module_exit(crypto_authenc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
MODULE_ALIAS_CRYPTO("authenc");