2019-05-30 02:57:49 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2010-01-07 07:57:19 +03:00
|
|
|
/*
|
|
|
|
* pcrypt - Parallel crypto wrapper.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2009 secunet Security Networks AG
|
|
|
|
* Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <crypto/algapi.h>
|
|
|
|
#include <crypto/internal/aead.h>
|
2015-05-22 15:34:22 +03:00
|
|
|
#include <linux/atomic.h>
|
2010-01-07 07:57:19 +03:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
2010-07-14 14:34:15 +04:00
|
|
|
#include <linux/kobject.h>
|
2010-07-27 09:18:46 +04:00
|
|
|
#include <linux/cpu.h>
|
2010-01-07 07:57:19 +03:00
|
|
|
#include <crypto/pcrypt.h>
|
|
|
|
|
2019-09-06 04:40:25 +03:00
|
|
|
static struct padata_instance *pencrypt;
|
|
|
|
static struct padata_instance *pdecrypt;
|
2010-07-14 14:34:15 +04:00
|
|
|
static struct kset *pcrypt_kset;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
struct pcrypt_instance_ctx {
|
2015-05-21 10:10:58 +03:00
|
|
|
struct crypto_aead_spawn spawn;
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
struct padata_shell *psenc;
|
|
|
|
struct padata_shell *psdec;
|
2015-05-22 15:34:22 +03:00
|
|
|
atomic_t tfm_count;
|
2010-01-07 07:57:19 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
struct pcrypt_aead_ctx {
|
|
|
|
struct crypto_aead *child;
|
|
|
|
unsigned int cb_cpu;
|
|
|
|
};
|
|
|
|
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
static inline struct pcrypt_instance_ctx *pcrypt_tfm_ictx(
|
|
|
|
struct crypto_aead *tfm)
|
|
|
|
{
|
|
|
|
return aead_instance_ctx(aead_alg_instance(tfm));
|
|
|
|
}
|
|
|
|
|
2010-01-07 07:57:19 +03:00
|
|
|
static int pcrypt_aead_setkey(struct crypto_aead *parent,
|
|
|
|
const u8 *key, unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
|
|
|
|
|
|
|
|
return crypto_aead_setkey(ctx->child, key, keylen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
|
|
|
|
unsigned int authsize)
|
|
|
|
{
|
|
|
|
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
|
|
|
|
|
|
|
|
return crypto_aead_setauthsize(ctx->child, authsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pcrypt_aead_serial(struct padata_priv *padata)
|
|
|
|
{
|
|
|
|
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
|
|
|
struct aead_request *req = pcrypt_request_ctx(preq);
|
|
|
|
|
|
|
|
aead_request_complete(req->base.data, padata->info);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
|
|
|
|
{
|
|
|
|
struct aead_request *req = areq->data;
|
|
|
|
struct pcrypt_request *preq = aead_request_ctx(req);
|
|
|
|
struct padata_priv *padata = pcrypt_request_padata(preq);
|
|
|
|
|
|
|
|
padata->info = err;
|
|
|
|
|
|
|
|
padata_do_serial(padata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pcrypt_aead_enc(struct padata_priv *padata)
|
|
|
|
{
|
|
|
|
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
|
|
|
struct aead_request *req = pcrypt_request_ctx(preq);
|
2021-10-21 21:30:28 +03:00
|
|
|
int ret;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2021-10-21 21:30:28 +03:00
|
|
|
ret = crypto_aead_encrypt(req);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2021-10-21 21:30:28 +03:00
|
|
|
if (ret == -EINPROGRESS)
|
2010-01-07 07:57:19 +03:00
|
|
|
return;
|
|
|
|
|
2021-10-21 21:30:28 +03:00
|
|
|
padata->info = ret;
|
2010-01-07 07:57:19 +03:00
|
|
|
padata_do_serial(padata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pcrypt_aead_encrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct pcrypt_request *preq = aead_request_ctx(req);
|
|
|
|
struct aead_request *creq = pcrypt_request_ctx(preq);
|
|
|
|
struct padata_priv *padata = pcrypt_request_padata(preq);
|
|
|
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
|
|
|
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
|
|
|
|
u32 flags = aead_request_flags(req);
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
struct pcrypt_instance_ctx *ictx;
|
|
|
|
|
|
|
|
ictx = pcrypt_tfm_ictx(aead);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
memset(padata, 0, sizeof(struct padata_priv));
|
|
|
|
|
|
|
|
padata->parallel = pcrypt_aead_enc;
|
|
|
|
padata->serial = pcrypt_aead_serial;
|
|
|
|
|
|
|
|
aead_request_set_tfm(creq, ctx->child);
|
|
|
|
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
|
|
pcrypt_aead_done, req);
|
|
|
|
aead_request_set_crypt(creq, req->src, req->dst,
|
|
|
|
req->cryptlen, req->iv);
|
2015-05-28 17:08:00 +03:00
|
|
|
aead_request_set_ad(creq, req->assoclen);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
|
2010-07-07 17:32:02 +04:00
|
|
|
if (!err)
|
|
|
|
return -EINPROGRESS;
|
crypto: pcrypt - Fix hungtask for PADATA_RESET
[ Upstream commit 8f4f68e788c3a7a696546291258bfa5fdb215523 ]
We found a hungtask bug in test_aead_vec_cfg as follows:
INFO: task cryptomgr_test:391009 blocked for more than 120 seconds.
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
Call trace:
__switch_to+0x98/0xe0
__schedule+0x6c4/0xf40
schedule+0xd8/0x1b4
schedule_timeout+0x474/0x560
wait_for_common+0x368/0x4e0
wait_for_completion+0x20/0x30
wait_for_completion+0x20/0x30
test_aead_vec_cfg+0xab4/0xd50
test_aead+0x144/0x1f0
alg_test_aead+0xd8/0x1e0
alg_test+0x634/0x890
cryptomgr_test+0x40/0x70
kthread+0x1e0/0x220
ret_from_fork+0x10/0x18
Kernel panic - not syncing: hung_task: blocked tasks
For padata_do_parallel, when the return err is 0 or -EBUSY, it will call
wait_for_completion(&wait->completion) in test_aead_vec_cfg. In normal
case, aead_request_complete() will be called in pcrypt_aead_serial and the
return err is 0 for padata_do_parallel. But, when pinst->flags is
PADATA_RESET, the return err is -EBUSY for padata_do_parallel, and it
won't call aead_request_complete(). Therefore, test_aead_vec_cfg will
hung at wait_for_completion(&wait->completion), which will cause
hungtask.
The problem comes as following:
(padata_do_parallel) |
rcu_read_lock_bh(); |
err = -EINVAL; | (padata_replace)
| pinst->flags |= PADATA_RESET;
err = -EBUSY |
if (pinst->flags & PADATA_RESET) |
rcu_read_unlock_bh() |
return err
In order to resolve the problem, we replace the return err -EBUSY with
-EAGAIN, which means parallel_data is changing, and the caller should call
it again.
v3:
remove retry and just change the return err.
v2:
introduce padata_try_do_parallel() in pcrypt_aead_encrypt and
pcrypt_aead_decrypt to solve the hungtask.
Signed-off-by: Lu Jialin <lujialin4@huawei.com>
Signed-off-by: Guo Zihua <guozihua@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2023-09-04 16:33:41 +03:00
|
|
|
if (err == -EBUSY)
|
|
|
|
return -EAGAIN;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pcrypt_aead_dec(struct padata_priv *padata)
|
|
|
|
{
|
|
|
|
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
|
|
|
struct aead_request *req = pcrypt_request_ctx(preq);
|
2021-10-21 21:30:28 +03:00
|
|
|
int ret;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2021-10-21 21:30:28 +03:00
|
|
|
ret = crypto_aead_decrypt(req);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2021-10-21 21:30:28 +03:00
|
|
|
if (ret == -EINPROGRESS)
|
2010-01-07 07:57:19 +03:00
|
|
|
return;
|
|
|
|
|
2021-10-21 21:30:28 +03:00
|
|
|
padata->info = ret;
|
2010-01-07 07:57:19 +03:00
|
|
|
padata_do_serial(padata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pcrypt_aead_decrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct pcrypt_request *preq = aead_request_ctx(req);
|
|
|
|
struct aead_request *creq = pcrypt_request_ctx(preq);
|
|
|
|
struct padata_priv *padata = pcrypt_request_padata(preq);
|
|
|
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
|
|
|
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
|
|
|
|
u32 flags = aead_request_flags(req);
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
struct pcrypt_instance_ctx *ictx;
|
|
|
|
|
|
|
|
ictx = pcrypt_tfm_ictx(aead);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
memset(padata, 0, sizeof(struct padata_priv));
|
|
|
|
|
|
|
|
padata->parallel = pcrypt_aead_dec;
|
|
|
|
padata->serial = pcrypt_aead_serial;
|
|
|
|
|
|
|
|
aead_request_set_tfm(creq, ctx->child);
|
|
|
|
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
|
|
pcrypt_aead_done, req);
|
|
|
|
aead_request_set_crypt(creq, req->src, req->dst,
|
|
|
|
req->cryptlen, req->iv);
|
2015-05-28 17:08:00 +03:00
|
|
|
aead_request_set_ad(creq, req->assoclen);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
|
2010-07-07 17:32:02 +04:00
|
|
|
if (!err)
|
|
|
|
return -EINPROGRESS;
|
crypto: pcrypt - Fix hungtask for PADATA_RESET
[ Upstream commit 8f4f68e788c3a7a696546291258bfa5fdb215523 ]
We found a hungtask bug in test_aead_vec_cfg as follows:
INFO: task cryptomgr_test:391009 blocked for more than 120 seconds.
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
Call trace:
__switch_to+0x98/0xe0
__schedule+0x6c4/0xf40
schedule+0xd8/0x1b4
schedule_timeout+0x474/0x560
wait_for_common+0x368/0x4e0
wait_for_completion+0x20/0x30
wait_for_completion+0x20/0x30
test_aead_vec_cfg+0xab4/0xd50
test_aead+0x144/0x1f0
alg_test_aead+0xd8/0x1e0
alg_test+0x634/0x890
cryptomgr_test+0x40/0x70
kthread+0x1e0/0x220
ret_from_fork+0x10/0x18
Kernel panic - not syncing: hung_task: blocked tasks
For padata_do_parallel, when the return err is 0 or -EBUSY, it will call
wait_for_completion(&wait->completion) in test_aead_vec_cfg. In normal
case, aead_request_complete() will be called in pcrypt_aead_serial and the
return err is 0 for padata_do_parallel. But, when pinst->flags is
PADATA_RESET, the return err is -EBUSY for padata_do_parallel, and it
won't call aead_request_complete(). Therefore, test_aead_vec_cfg will
hung at wait_for_completion(&wait->completion), which will cause
hungtask.
The problem comes as following:
(padata_do_parallel) |
rcu_read_lock_bh(); |
err = -EINVAL; | (padata_replace)
| pinst->flags |= PADATA_RESET;
err = -EBUSY |
if (pinst->flags & PADATA_RESET) |
rcu_read_unlock_bh() |
return err
In order to resolve the problem, we replace the return err -EBUSY with
-EAGAIN, which means parallel_data is changing, and the caller should call
it again.
v3:
remove retry and just change the return err.
v2:
introduce padata_try_do_parallel() in pcrypt_aead_encrypt and
pcrypt_aead_decrypt to solve the hungtask.
Signed-off-by: Lu Jialin <lujialin4@huawei.com>
Signed-off-by: Guo Zihua <guozihua@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2023-09-04 16:33:41 +03:00
|
|
|
if (err == -EBUSY)
|
|
|
|
return -EAGAIN;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
|
2010-01-07 07:57:19 +03:00
|
|
|
{
|
|
|
|
int cpu, cpu_index;
|
2015-05-28 17:08:00 +03:00
|
|
|
struct aead_instance *inst = aead_alg_instance(tfm);
|
|
|
|
struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
|
|
|
|
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
2010-01-07 07:57:19 +03:00
|
|
|
struct crypto_aead *cipher;
|
|
|
|
|
2015-05-22 15:34:22 +03:00
|
|
|
cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
|
|
|
|
cpumask_weight(cpu_online_mask);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2012-03-28 10:51:03 +04:00
|
|
|
ctx->cb_cpu = cpumask_first(cpu_online_mask);
|
2010-01-07 07:57:19 +03:00
|
|
|
for (cpu = 0; cpu < cpu_index; cpu++)
|
2012-03-28 10:51:03 +04:00
|
|
|
ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
cipher = crypto_spawn_aead(&ictx->spawn);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
if (IS_ERR(cipher))
|
|
|
|
return PTR_ERR(cipher);
|
|
|
|
|
|
|
|
ctx->child = cipher;
|
2015-05-28 17:08:00 +03:00
|
|
|
crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
|
|
|
|
sizeof(struct aead_request) +
|
|
|
|
crypto_aead_reqsize(cipher));
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
|
2010-01-07 07:57:19 +03:00
|
|
|
{
|
2015-05-28 17:08:00 +03:00
|
|
|
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
crypto_free_aead(ctx->child);
|
|
|
|
}
|
|
|
|
|
2017-12-21 01:28:25 +03:00
|
|
|
static void pcrypt_free(struct aead_instance *inst)
|
|
|
|
{
|
|
|
|
struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
|
|
|
|
|
|
|
|
crypto_drop_aead(&ctx->spawn);
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
padata_free_shell(ctx->psdec);
|
|
|
|
padata_free_shell(ctx->psenc);
|
2017-12-21 01:28:25 +03:00
|
|
|
kfree(inst);
|
|
|
|
}
|
|
|
|
|
2015-05-21 10:10:58 +03:00
|
|
|
static int pcrypt_init_instance(struct crypto_instance *inst,
|
|
|
|
struct crypto_alg *alg)
|
2010-01-07 07:57:19 +03:00
|
|
|
{
|
|
|
|
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
|
|
|
"pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
2015-05-21 10:10:58 +03:00
|
|
|
return -ENAMETOOLONG;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
|
|
|
|
|
|
|
|
inst->alg.cra_priority = alg->cra_priority + 100;
|
|
|
|
inst->alg.cra_blocksize = alg->cra_blocksize;
|
|
|
|
inst->alg.cra_alignmask = alg->cra_alignmask;
|
|
|
|
|
2015-05-21 10:10:58 +03:00
|
|
|
return 0;
|
2010-01-07 07:57:19 +03:00
|
|
|
}
|
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
2020-07-10 09:20:38 +03:00
|
|
|
struct crypto_attr_type *algt)
|
2010-01-07 07:57:19 +03:00
|
|
|
{
|
2015-05-21 10:10:58 +03:00
|
|
|
struct pcrypt_instance_ctx *ctx;
|
2015-05-28 17:08:00 +03:00
|
|
|
struct aead_instance *inst;
|
|
|
|
struct aead_alg *alg;
|
2020-07-10 09:20:38 +03:00
|
|
|
u32 mask = crypto_algt_inherited_mask(algt);
|
2015-05-21 10:10:58 +03:00
|
|
|
int err;
|
|
|
|
|
|
|
|
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
|
|
|
if (!inst)
|
2015-05-28 17:08:00 +03:00
|
|
|
return -ENOMEM;
|
2015-05-21 10:10:58 +03:00
|
|
|
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
err = -ENOMEM;
|
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
ctx = aead_instance_ctx(inst);
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
ctx->psenc = padata_alloc_shell(pencrypt);
|
|
|
|
if (!ctx->psenc)
|
2020-02-26 07:59:22 +03:00
|
|
|
goto err_free_inst;
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
|
|
|
|
ctx->psdec = padata_alloc_shell(pdecrypt);
|
|
|
|
if (!ctx->psdec)
|
2020-02-26 07:59:22 +03:00
|
|
|
goto err_free_inst;
|
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
If the pcrypt template is used multiple times in an algorithm, then a
deadlock occurs because all pcrypt instances share the same
padata_instance, which completes requests in the order submitted. That
is, the inner pcrypt request waits for the outer pcrypt request while
the outer request is already waiting for the inner.
This patch fixes this by allocating a set of queues for each pcrypt
instance instead of using two global queues. In order to maintain
the existing user-space interface, the pinst structure remains global
so any sysfs modifications will apply to every pcrypt instance.
Note that when an update occurs we have to allocate memory for
every pcrypt instance. Should one of the allocations fail we
will abort the update without rolling back changes already made.
The new per-instance data structure is called padata_shell and is
essentially a wrapper around parallel_data.
Reproducer:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "aead",
.salg_name = "pcrypt(pcrypt(rfc4106-gcm-aesni))"
};
int algfd, reqfd;
char buf[32] = { 0 };
algfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(algfd, (void *)&addr, sizeof(addr));
setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, 20);
reqfd = accept(algfd, 0, 0);
write(reqfd, buf, 32);
read(reqfd, buf, 16);
}
Reported-by: syzbot+56c7151cad94eec37c521f0e47d2eee53f9361c4@syzkaller.appspotmail.com
Fixes: 5068c7a883d1 ("crypto: pcrypt - Add pcrypt crypto parallelization wrapper")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-26 10:58:45 +03:00
|
|
|
|
2020-01-03 06:58:46 +03:00
|
|
|
err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
|
2020-07-10 09:20:38 +03:00
|
|
|
crypto_attr_alg_name(tb[1]), 0, mask);
|
2015-05-21 10:10:58 +03:00
|
|
|
if (err)
|
2020-02-26 07:59:22 +03:00
|
|
|
goto err_free_inst;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
alg = crypto_spawn_aead_alg(&ctx->spawn);
|
|
|
|
err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
|
2015-05-21 10:10:58 +03:00
|
|
|
if (err)
|
2020-02-26 07:59:22 +03:00
|
|
|
goto err_free_inst;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2020-07-10 09:20:38 +03:00
|
|
|
inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC;
|
2015-07-09 02:17:18 +03:00
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
|
|
|
|
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
inst->alg.init = pcrypt_aead_init_tfm;
|
|
|
|
inst->alg.exit = pcrypt_aead_exit_tfm;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
inst->alg.setkey = pcrypt_aead_setkey;
|
|
|
|
inst->alg.setauthsize = pcrypt_aead_setauthsize;
|
|
|
|
inst->alg.encrypt = pcrypt_aead_encrypt;
|
|
|
|
inst->alg.decrypt = pcrypt_aead_decrypt;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2017-12-21 01:28:25 +03:00
|
|
|
inst->free = pcrypt_free;
|
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
err = aead_register_instance(tmpl, inst);
|
2020-02-26 07:59:22 +03:00
|
|
|
if (err) {
|
|
|
|
err_free_inst:
|
|
|
|
pcrypt_free(inst);
|
|
|
|
}
|
2015-05-28 17:08:00 +03:00
|
|
|
return err;
|
2010-01-07 07:57:19 +03:00
|
|
|
}
|
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
|
2010-01-07 07:57:19 +03:00
|
|
|
{
|
|
|
|
struct crypto_attr_type *algt;
|
|
|
|
|
|
|
|
algt = crypto_get_attr_type(tb);
|
|
|
|
if (IS_ERR(algt))
|
2015-05-28 17:08:00 +03:00
|
|
|
return PTR_ERR(algt);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
|
|
|
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
|
|
|
|
case CRYPTO_ALG_TYPE_AEAD:
|
2020-07-10 09:20:38 +03:00
|
|
|
return pcrypt_create_aead(tmpl, tb, algt);
|
2010-01-07 07:57:19 +03:00
|
|
|
}
|
|
|
|
|
2015-05-28 17:08:00 +03:00
|
|
|
return -EINVAL;
|
2010-01-07 07:57:19 +03:00
|
|
|
}
|
|
|
|
|
2010-07-14 14:34:15 +04:00
|
|
|
static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pinst->kobj.kset = pcrypt_kset;
|
2018-10-27 17:49:26 +03:00
|
|
|
ret = kobject_add(&pinst->kobj, NULL, "%s", name);
|
2010-07-14 14:34:15 +04:00
|
|
|
if (!ret)
|
|
|
|
kobject_uevent(&pinst->kobj, KOBJ_ADD);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-06 04:40:25 +03:00
|
|
|
static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
|
2010-07-14 14:31:57 +04:00
|
|
|
{
|
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
2020-07-14 23:13:55 +03:00
|
|
|
*pinst = padata_alloc(name);
|
2019-09-06 04:40:25 +03:00
|
|
|
if (!*pinst)
|
|
|
|
return ret;
|
2010-07-14 14:31:57 +04:00
|
|
|
|
2019-09-06 04:40:25 +03:00
|
|
|
ret = pcrypt_sysfs_add(*pinst, name);
|
2010-07-14 14:34:15 +04:00
|
|
|
if (ret)
|
2019-09-06 04:40:25 +03:00
|
|
|
padata_free(*pinst);
|
2010-07-14 14:34:15 +04:00
|
|
|
|
2010-07-14 14:31:57 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-01-07 07:57:19 +03:00
|
|
|
static struct crypto_template pcrypt_tmpl = {
|
|
|
|
.name = "pcrypt",
|
2015-05-28 17:08:00 +03:00
|
|
|
.create = pcrypt_create,
|
2010-01-07 07:57:19 +03:00
|
|
|
.module = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init pcrypt_init(void)
|
|
|
|
{
|
2010-07-14 14:34:15 +04:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
|
|
|
|
if (!pcrypt_kset)
|
|
|
|
goto err;
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2010-07-27 09:16:33 +04:00
|
|
|
err = pcrypt_init_padata(&pencrypt, "pencrypt");
|
2010-07-07 17:30:10 +04:00
|
|
|
if (err)
|
2010-07-14 14:34:15 +04:00
|
|
|
goto err_unreg_kset;
|
2010-07-07 17:30:10 +04:00
|
|
|
|
2010-07-27 09:16:33 +04:00
|
|
|
err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
|
2010-07-07 17:30:10 +04:00
|
|
|
if (err)
|
2010-07-14 14:31:57 +04:00
|
|
|
goto err_deinit_pencrypt;
|
2010-07-07 17:30:10 +04:00
|
|
|
|
2010-07-14 14:31:57 +04:00
|
|
|
return crypto_register_template(&pcrypt_tmpl);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2010-07-14 14:31:57 +04:00
|
|
|
err_deinit_pencrypt:
|
2020-07-14 23:13:52 +03:00
|
|
|
padata_free(pencrypt);
|
2010-07-14 14:34:15 +04:00
|
|
|
err_unreg_kset:
|
|
|
|
kset_unregister(pcrypt_kset);
|
2010-01-07 07:57:19 +03:00
|
|
|
err:
|
2010-07-07 17:30:10 +04:00
|
|
|
return err;
|
2010-01-07 07:57:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit pcrypt_exit(void)
|
|
|
|
{
|
2019-11-19 12:41:31 +03:00
|
|
|
crypto_unregister_template(&pcrypt_tmpl);
|
|
|
|
|
2020-07-14 23:13:52 +03:00
|
|
|
padata_free(pencrypt);
|
|
|
|
padata_free(pdecrypt);
|
2010-01-07 07:57:19 +03:00
|
|
|
|
2010-07-14 14:34:15 +04:00
|
|
|
kset_unregister(pcrypt_kset);
|
2010-01-07 07:57:19 +03:00
|
|
|
}
|
|
|
|
|
2019-04-12 07:57:42 +03:00
|
|
|
subsys_initcall(pcrypt_init);
|
2010-01-07 07:57:19 +03:00
|
|
|
module_exit(pcrypt_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
|
|
|
|
MODULE_DESCRIPTION("Parallel crypto wrapper");
|
2014-11-25 03:32:38 +03:00
|
|
|
MODULE_ALIAS_CRYPTO("pcrypt");
|