Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: - fix new compiler warnings in cavium - set post-op IV properly in caam (this fixes chaining) - fix potential use-after-free in atmel in case of EBUSY - fix sleeping in softirq path in chcr - disable buggy sha1-avx2 driver (may overread and page fault) - fix use-after-free on signals in caam * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: cavium - make several functions static crypto: chcr - Avoid algo allocation in softirq. crypto: caam - properly set IV after {en,de}crypt crypto: atmel - only treat EBUSY as transient if backlog crypto: af_alg - Avoid sock_graft call warning crypto: caam - fix signals handling crypto: sha1-ssse3 - Disable avx2
This commit is contained in:
Коммит
dcf903d0c9
|
@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
|
||||||
|
|
||||||
static bool avx2_usable(void)
|
static bool avx2_usable(void)
|
||||||
{
|
{
|
||||||
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
||||||
&& boot_cpu_has(X86_FEATURE_BMI1)
|
&& boot_cpu_has(X86_FEATURE_BMI1)
|
||||||
&& boot_cpu_has(X86_FEATURE_BMI2))
|
&& boot_cpu_has(X86_FEATURE_BMI2))
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -287,7 +287,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
sock_init_data(newsock, sk2);
|
sock_init_data(newsock, sk2);
|
||||||
sock_graft(sk2, newsock);
|
security_sock_graft(sk2, newsock);
|
||||||
security_sk_clone(sk, sk2);
|
security_sk_clone(sk, sk2);
|
||||||
|
|
||||||
err = type->accept(ask->private, sk2);
|
err = type->accept(ask->private, sk2);
|
||||||
|
|
|
@ -1204,7 +1204,9 @@ static int atmel_sha_finup(struct ahash_request *req)
|
||||||
ctx->flags |= SHA_FLAGS_FINUP;
|
ctx->flags |= SHA_FLAGS_FINUP;
|
||||||
|
|
||||||
err1 = atmel_sha_update(req);
|
err1 = atmel_sha_update(req);
|
||||||
if (err1 == -EINPROGRESS || err1 == -EBUSY)
|
if (err1 == -EINPROGRESS ||
|
||||||
|
(err1 == -EBUSY && (ahash_request_flags(req) &
|
||||||
|
CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
||||||
return err1;
|
return err1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -882,10 +882,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||||
{
|
{
|
||||||
struct ablkcipher_request *req = context;
|
struct ablkcipher_request *req = context;
|
||||||
struct ablkcipher_edesc *edesc;
|
struct ablkcipher_edesc *edesc;
|
||||||
#ifdef DEBUG
|
|
||||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||||
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -904,6 +904,14 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ablkcipher_unmap(jrdev, edesc, req);
|
ablkcipher_unmap(jrdev, edesc, req);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The crypto API expects us to set the IV (req->info) to the last
|
||||||
|
* ciphertext block. This is used e.g. by the CTS mode.
|
||||||
|
*/
|
||||||
|
scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
|
||||||
|
ivsize, 0);
|
||||||
|
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
|
||||||
ablkcipher_request_complete(req, err);
|
ablkcipher_request_complete(req, err);
|
||||||
|
@ -914,10 +922,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||||
{
|
{
|
||||||
struct ablkcipher_request *req = context;
|
struct ablkcipher_request *req = context;
|
||||||
struct ablkcipher_edesc *edesc;
|
struct ablkcipher_edesc *edesc;
|
||||||
#ifdef DEBUG
|
|
||||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||||
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -935,6 +943,14 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ablkcipher_unmap(jrdev, edesc, req);
|
ablkcipher_unmap(jrdev, edesc, req);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The crypto API expects us to set the IV (req->info) to the last
|
||||||
|
* ciphertext block.
|
||||||
|
*/
|
||||||
|
scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
|
||||||
|
ivsize, 0);
|
||||||
|
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
|
||||||
ablkcipher_request_complete(req, err);
|
ablkcipher_request_complete(req, err);
|
||||||
|
|
|
@ -396,7 +396,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||||
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
/* in progress */
|
/* in progress */
|
||||||
wait_for_completion_interruptible(&result.completion);
|
wait_for_completion(&result.completion);
|
||||||
ret = result.err;
|
ret = result.err;
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
print_hex_dump(KERN_ERR,
|
print_hex_dump(KERN_ERR,
|
||||||
|
|
|
@ -149,7 +149,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
||||||
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
/* in progress */
|
/* in progress */
|
||||||
wait_for_completion_interruptible(&result.completion);
|
wait_for_completion(&result.completion);
|
||||||
ret = result.err;
|
ret = result.err;
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||||
|
|
|
@ -222,17 +222,17 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cvm_encrypt(struct ablkcipher_request *req)
|
static int cvm_encrypt(struct ablkcipher_request *req)
|
||||||
{
|
{
|
||||||
return cvm_enc_dec(req, true);
|
return cvm_enc_dec(req, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cvm_decrypt(struct ablkcipher_request *req)
|
static int cvm_decrypt(struct ablkcipher_request *req)
|
||||||
{
|
{
|
||||||
return cvm_enc_dec(req, false);
|
return cvm_enc_dec(req, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||||
u32 keylen)
|
u32 keylen)
|
||||||
{
|
{
|
||||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||||
|
@ -336,7 +336,7 @@ static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||||
return cvm_setkey(cipher, key, keylen, DES3_ECB);
|
return cvm_setkey(cipher, key, keylen, DES3_ECB);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cvm_enc_dec_init(struct crypto_tfm *tfm)
|
static int cvm_enc_dec_init(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
|
|
|
@ -898,26 +898,20 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
|
||||||
u8 *key;
|
u8 *key;
|
||||||
unsigned int keylen;
|
unsigned int keylen;
|
||||||
|
|
||||||
cipher = crypto_alloc_cipher("aes-generic", 0, 0);
|
cipher = ablkctx->aes_generic;
|
||||||
memcpy(iv, req->info, AES_BLOCK_SIZE);
|
memcpy(iv, req->info, AES_BLOCK_SIZE);
|
||||||
|
|
||||||
if (IS_ERR(cipher)) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
keylen = ablkctx->enckey_len / 2;
|
keylen = ablkctx->enckey_len / 2;
|
||||||
key = ablkctx->key + keylen;
|
key = ablkctx->key + keylen;
|
||||||
ret = crypto_cipher_setkey(cipher, key, keylen);
|
ret = crypto_cipher_setkey(cipher, key, keylen);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out1;
|
goto out;
|
||||||
|
|
||||||
crypto_cipher_encrypt_one(cipher, iv, iv);
|
crypto_cipher_encrypt_one(cipher, iv, iv);
|
||||||
for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
|
for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
|
||||||
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
|
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
|
||||||
|
|
||||||
crypto_cipher_decrypt_one(cipher, iv, iv);
|
crypto_cipher_decrypt_one(cipher, iv, iv);
|
||||||
out1:
|
|
||||||
crypto_free_cipher(cipher);
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1261,6 +1255,17 @@ static int chcr_cra_init(struct crypto_tfm *tfm)
|
||||||
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
|
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
|
||||||
return PTR_ERR(ablkctx->sw_cipher);
|
return PTR_ERR(ablkctx->sw_cipher);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
|
||||||
|
/* To update tweak*/
|
||||||
|
ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
|
||||||
|
if (IS_ERR(ablkctx->aes_generic)) {
|
||||||
|
pr_err("failed to allocate aes cipher for tweak\n");
|
||||||
|
return PTR_ERR(ablkctx->aes_generic);
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
ablkctx->aes_generic = NULL;
|
||||||
|
|
||||||
tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
|
tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
|
||||||
return chcr_device_init(crypto_tfm_ctx(tfm));
|
return chcr_device_init(crypto_tfm_ctx(tfm));
|
||||||
}
|
}
|
||||||
|
@ -1291,6 +1296,8 @@ static void chcr_cra_exit(struct crypto_tfm *tfm)
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
||||||
|
|
||||||
crypto_free_skcipher(ablkctx->sw_cipher);
|
crypto_free_skcipher(ablkctx->sw_cipher);
|
||||||
|
if (ablkctx->aes_generic)
|
||||||
|
crypto_free_cipher(ablkctx->aes_generic);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int get_alg_config(struct algo_param *params,
|
static int get_alg_config(struct algo_param *params,
|
||||||
|
|
|
@ -155,6 +155,7 @@
|
||||||
|
|
||||||
struct ablk_ctx {
|
struct ablk_ctx {
|
||||||
struct crypto_skcipher *sw_cipher;
|
struct crypto_skcipher *sw_cipher;
|
||||||
|
struct crypto_cipher *aes_generic;
|
||||||
__be32 key_ctx_hdr;
|
__be32 key_ctx_hdr;
|
||||||
unsigned int enckey_len;
|
unsigned int enckey_len;
|
||||||
unsigned char ciph_mode;
|
unsigned char ciph_mode;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче