Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:
   - Zero-length DMA mapping in caam
   - Invalidly mapping stack memory for DMA in talitos
   - Use after free in cavium/nitrox
   - Key parsing in authenc
   - Undefined shift in sm3
   - Bogus completion call in authencesn
   - SHA support detection in caam"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: sm3 - fix undefined shift by >= width of value
  crypto: talitos - fix ablkcipher for CONFIG_VMAP_STACK
  crypto: talitos - reorder code in talitos_edesc_alloc()
  crypto: adiantum - initialize crypto_spawn::inst
  crypto: cavium/nitrox - Use after free in process_response_list()
  crypto: authencesn - Avoid twice completion call in decrypt path
  crypto: caam - fix SHA support detection
  crypto: caam - fix zero-length buffer DMA mapping
  crypto: ccree - convert to use crypto_authenc_extractkeys()
  crypto: bcm - convert to use crypto_authenc_extractkeys()
  crypto: authenc - fix parsing key with misaligned rta_len
This commit is contained in:
Linus Torvalds 2019-01-19 05:48:43 +12:00
Родитель 6e434bf2e3 d45a90cb5d
Коммит dc6fef2cc5
13 изменённых файлов: 80 добавлений и 82 удалений

Просмотреть файл

@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */
crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
skcipher_crypto_instance(inst));
err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
0, crypto_requires_sync(algt->type,
algt->mask));
@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
crypto_set_spawn(&ictx->blockcipher_spawn,
skcipher_crypto_instance(inst));
err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
if (err)

Просмотреть файл

@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
/*
* RTA_OK() didn't align the rtattr's payload when validating that it
* fits in the buffer. Yet, the keys should start on the next 4-byte
* aligned boundary. To avoid confusion, require that the rtattr
* payload be exactly the param struct, which has a 4-byte aligned size.
*/
if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL;
BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
key += rta->rta_len;
keylen -= rta->rta_len;
if (keylen < keys->enckeylen)
return -EINVAL;

Просмотреть файл

@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
aead_request_complete(req, err);
authenc_esn_request_complete(req, err);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)

Просмотреть файл

@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
for (i = 0; i <= 63; i++) {
ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
ss2 = ss1 ^ rol32(a, 12);

Просмотреть файл

@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
depends on ARCH_BCM_IPROC
depends on MAILBOX
default m
select CRYPTO_AUTHENC
select CRYPTO_DES
select CRYPTO_MD5
select CRYPTO_SHA1

Просмотреть файл

@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
struct spu_hw *spu = &iproc_priv.spu;
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
const u8 *origkey = key;
const unsigned int origkeylen = keylen;
int ret = 0;
struct crypto_authenc_keys keys;
int ret;
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
keylen);
flow_dump(" key: ", key, keylen);
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
ret = crypto_authenc_extractkeys(&keys, key, keylen);
if (ret)
goto badkey;
param = RTA_DATA(rta);
ctx->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enckeylen)
goto badkey;
if (ctx->enckeylen > MAX_KEY_SIZE)
if (keys.enckeylen > MAX_KEY_SIZE ||
keys.authkeylen > MAX_KEY_SIZE)
goto badkey;
ctx->authkeylen = keylen - ctx->enckeylen;
ctx->enckeylen = keys.enckeylen;
ctx->authkeylen = keys.authkeylen;
if (ctx->authkeylen > MAX_KEY_SIZE)
goto badkey;
memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
/* May end up padding auth key. So make sure it's zeroed. */
memset(ctx->authkey, 0, sizeof(ctx->authkey));
memcpy(ctx->authkey, key, ctx->authkeylen);
memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
switch (ctx->alg->cipher_info.alg) {
case CIPHER_ALG_DES:
@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
u32 tmp[DES_EXPKEY_WORDS];
u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
if (des_ekey(tmp, key) == 0) {
if (des_ekey(tmp, keys.enckey) == 0) {
if (crypto_aead_get_flags(cipher) &
CRYPTO_TFM_REQ_WEAK_KEY) {
crypto_aead_set_flags(cipher, flags);
@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break;
case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
const u32 *K = (const u32 *)key;
const u32 *K = (const u32 *)keys.enckey;
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret =
crypto_aead_setkey(ctx->fallback_cipher, origkey,
origkeylen);
ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
if (ret) {
flow_log(" fallback setkey() returned:%d\n", ret);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;

Просмотреть файл

@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void)
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device.
*/
if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
if (is_mdha(c2_alg_sel) &&
(!md_inst || t_alg->aead.maxauthsize > md_limit))
continue;

Просмотреть файл

@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
desc = edesc->hw_desc;
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map src\n");
goto unmap;
}
if (buflen) {
state->buf_dma = dma_map_single(jrdev, buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map src\n");
goto unmap;
}
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
}
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);

Просмотреть файл

@ -1155,6 +1155,7 @@
#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)

Просмотреть файл

@ -7,6 +7,9 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#include "desc.h"
#define CAAM_ERROR_STR_MAX 302
void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii);
static inline bool is_mdha(u32 algtype)
{
return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
OP_ALG_CHA_MDHA;
}
#endif /* CAAM_ERROR_H */

Просмотреть файл

@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
/* ORH error code */
err = READ_ONCE(*sr->resp.orh) & 0xff;
softreq_destroy(sr);
if (sr->callback)
sr->callback(sr->cb_arg, err);
softreq_destroy(sr);
req_completed++;
}

Просмотреть файл

@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
struct cc_crypto_req cc_req = {};
struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
int rc = -EINVAL;
unsigned int seq_len = 0;
struct device *dev = drvdata_to_dev(ctx->drvdata);
const u8 *enckey, *authkey;
int rc;
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
if (!RTA_OK(rta, keylen))
struct crypto_authenc_keys keys;
rc = crypto_authenc_extractkeys(&keys, key, keylen);
if (rc)
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
ctx->enc_keylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enc_keylen)
goto badkey;
ctx->auth_keylen = keylen - ctx->enc_keylen;
enckey = keys.enckey;
authkey = keys.authkey;
ctx->enc_keylen = keys.enckeylen;
ctx->auth_keylen = keys.authkeylen;
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
CTR_RFC3686_NONCE_SIZE);
memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
}
} else { /* non-authenc - has just one key */
enckey = key;
authkey = NULL;
ctx->enc_keylen = keylen;
ctx->auth_keylen = 0;
}
@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_1: Copy key to ctx */
/* Get key material */
memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
memcpy(ctx->enckey, enckey, ctx->enc_keylen);
if (ctx->enc_keylen == 24)
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
goto badkey;
}

Просмотреть файл

@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
void *err;
if (cryptlen + authsize > max_len) {
dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL);
}
if (ivsize)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
if (!dst || dst == src) {
src_len = assoclen + cryptlen + authsize;
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
return ERR_PTR(-EINVAL);
}
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
/* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst)
alloc_len += sizeof(struct talitos_desc);
alloc_len += ivsize;
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
err = ERR_PTR(-ENOMEM);
goto error_sg;
if (!edesc)
return ERR_PTR(-ENOMEM);
if (ivsize) {
iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
}
memset(&edesc->desc, 0, sizeof(edesc->desc));
@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
DMA_BIDIRECTIONAL);
}
return edesc;
error_sg:
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
return err;
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,