crypto: x86/chacha - avoid sleeping under kernel_fpu_begin()
Passing atomic=true to skcipher_walk_virt() only makes the later
skcipher_walk_done() calls use atomic memory allocations, not
skcipher_walk_virt() itself. Thus, we have to move it outside of the
preemption-disabled region (kernel_fpu_begin()/kernel_fpu_end()).
(skcipher_walk_virt() only allocates memory for certain layouts of the
input scatterlist, hence why I didn't notice this earlier...)
Reported-by: syzbot+9bf843c33f782d73ae7d@syzkaller.appspotmail.com
Fixes: 4af7826187
("crypto: x86/chacha20 - add XChaCha20 support")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Родитель
c9613335bf
Коммит
f9c9bdb513
|
@ -127,30 +127,27 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chacha_simd_stream_xor(struct skcipher_request *req,
|
static int chacha_simd_stream_xor(struct skcipher_walk *walk,
|
||||||
struct chacha_ctx *ctx, u8 *iv)
|
struct chacha_ctx *ctx, u8 *iv)
|
||||||
{
|
{
|
||||||
u32 *state, state_buf[16 + 2] __aligned(8);
|
u32 *state, state_buf[16 + 2] __aligned(8);
|
||||||
struct skcipher_walk walk;
|
|
||||||
int next_yield = 4096; /* bytes until next FPU yield */
|
int next_yield = 4096; /* bytes until next FPU yield */
|
||||||
int err;
|
int err = 0;
|
||||||
|
|
||||||
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
||||||
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
||||||
|
|
||||||
err = skcipher_walk_virt(&walk, req, true);
|
|
||||||
|
|
||||||
crypto_chacha_init(state, ctx, iv);
|
crypto_chacha_init(state, ctx, iv);
|
||||||
|
|
||||||
while (walk.nbytes > 0) {
|
while (walk->nbytes > 0) {
|
||||||
unsigned int nbytes = walk.nbytes;
|
unsigned int nbytes = walk->nbytes;
|
||||||
|
|
||||||
if (nbytes < walk.total) {
|
if (nbytes < walk->total) {
|
||||||
nbytes = round_down(nbytes, walk.stride);
|
nbytes = round_down(nbytes, walk->stride);
|
||||||
next_yield -= nbytes;
|
next_yield -= nbytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
chacha_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
|
chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr,
|
||||||
nbytes, ctx->nrounds);
|
nbytes, ctx->nrounds);
|
||||||
|
|
||||||
if (next_yield <= 0) {
|
if (next_yield <= 0) {
|
||||||
|
@ -160,7 +157,7 @@ static int chacha_simd_stream_xor(struct skcipher_request *req,
|
||||||
next_yield = 4096;
|
next_yield = 4096;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
err = skcipher_walk_done(walk, walk->nbytes - nbytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -170,13 +167,18 @@ static int chacha_simd(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
struct skcipher_walk walk;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
|
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
|
||||||
return crypto_chacha_crypt(req);
|
return crypto_chacha_crypt(req);
|
||||||
|
|
||||||
|
err = skcipher_walk_virt(&walk, req, true);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
err = chacha_simd_stream_xor(req, ctx, req->iv);
|
err = chacha_simd_stream_xor(&walk, ctx, req->iv);
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -185,6 +187,7 @@ static int xchacha_simd(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
struct skcipher_walk walk;
|
||||||
struct chacha_ctx subctx;
|
struct chacha_ctx subctx;
|
||||||
u32 *state, state_buf[16 + 2] __aligned(8);
|
u32 *state, state_buf[16 + 2] __aligned(8);
|
||||||
u8 real_iv[16];
|
u8 real_iv[16];
|
||||||
|
@ -193,6 +196,10 @@ static int xchacha_simd(struct skcipher_request *req)
|
||||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
|
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
|
||||||
return crypto_xchacha_crypt(req);
|
return crypto_xchacha_crypt(req);
|
||||||
|
|
||||||
|
err = skcipher_walk_virt(&walk, req, true);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
||||||
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
||||||
crypto_chacha_init(state, ctx, req->iv);
|
crypto_chacha_init(state, ctx, req->iv);
|
||||||
|
@ -204,7 +211,7 @@ static int xchacha_simd(struct skcipher_request *req)
|
||||||
|
|
||||||
memcpy(&real_iv[0], req->iv + 24, 8);
|
memcpy(&real_iv[0], req->iv + 24, 8);
|
||||||
memcpy(&real_iv[8], req->iv + 16, 8);
|
memcpy(&real_iv[8], req->iv + 16, 8);
|
||||||
err = chacha_simd_stream_xor(req, &subctx, real_iv);
|
err = chacha_simd_stream_xor(&walk, &subctx, real_iv);
|
||||||
|
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче