crypto: arm64-aes - fix encryption of unaligned data
cryptsetup fails on arm64 when using kernel encryption via AF_ALG socket. See https://bugzilla.redhat.com/show_bug.cgi?id=1122937 The bug is caused by incorrect handling of unaligned data in arch/arm64/crypto/aes-glue.c. Cryptsetup creates a buffer that is aligned on 8 bytes, but not on 16 bytes. It opens AF_ALG socket and uses the socket to encrypt data in the buffer. The arm64 crypto accelerator causes data corruption or crashes in the scatterwalk_pagedone. This patch fixes the bug by passing the residue bytes that were not processed as the last parameter to blkcipher_walk_done. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Родитель
e052dbf554
Коммит
f960d2093f
|
@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
|
||||
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_enc, rounds, blocks, first);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
return err;
|
||||
|
@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
|
||||
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_dec, rounds, blocks, first);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
return err;
|
||||
|
@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
|
||||
first);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
return err;
|
||||
|
@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
|
||||
first);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
return err;
|
||||
|
@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key1.key_enc, rounds, blocks,
|
||||
(u8 *)ctx->key2.key_enc, walk.iv, first);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
|
@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key1.key_dec, rounds, blocks,
|
||||
(u8 *)ctx->key2.key_enc, walk.iv, first);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче