crypto: mediatek - regroup functions by usage

This patch only regroup functions by usage.
This will help to integrate the GCM support patch later by
adjusting some shared code section, such as common code which
will be reused by GCM, AES mode setting, and DMA transfer.

Signed-off-by: Ryder Lee <ryder.lee@mediatek.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Ryder Lee 2017-01-20 13:41:12 +08:00 коммит произвёл Herbert Xu
Родитель 87421984b4
Коммит 0abc271494
1 изменённых файлов: 141 добавлений и 131 удалений

Просмотреть файл

@ -42,7 +42,6 @@
#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5)
/* AES flags */ /* AES flags */
#define AES_FLAGS_MODE_MSK 0x7
#define AES_FLAGS_ECB BIT(0) #define AES_FLAGS_ECB BIT(0)
#define AES_FLAGS_CBC BIT(1) #define AES_FLAGS_CBC BIT(1)
#define AES_FLAGS_ENCRYPT BIT(2) #define AES_FLAGS_ENCRYPT BIT(2)
@ -170,65 +169,28 @@ static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
return false; return false;
} }
/* Initialize and map transform information of AES */ static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
static int mtk_aes_info_map(struct mtk_cryp *cryp, const struct mtk_aes_reqctx *rctx)
struct mtk_aes_rec *aes,
size_t len)
{ {
struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); /* Clear all but persistent flags and set request flags. */
struct mtk_aes_base_ctx *ctx = aes->ctx; aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
}
ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len); {
ctx->ct.cmd[1] = AES_CMD1; struct scatterlist *sg = dma->sg;
int nents = dma->nents;
if (aes->flags & AES_FLAGS_ENCRYPT) if (!dma->remainder)
ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT; return;
else
ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN;
if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128)) while (--nents > 0 && sg)
ctx->tfm.ctrl[0] |= AES_TFM_128BITS; sg = sg_next(sg);
else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_192))
ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
if (aes->flags & AES_FLAGS_CBC) { if (!sg)
const u32 *iv = (const u32 *)req->info; return;
u32 *iv_state = ctx->tfm.state + ctx->keylen;
int i;
ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen + sg->length += dma->remainder;
SIZE_IN_WORDS(AES_BLOCK_SIZE));
ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV;
for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
iv_state[i] = cpu_to_le32(iv[i]);
ctx->ct.cmd[2] = AES_CMD2;
ctx->ct_size = AES_CT_SIZE_CBC;
} else if (aes->flags & AES_FLAGS_ECB) {
ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen);
ctx->tfm.ctrl[1] = AES_TFM_ECB;
ctx->ct_size = AES_CT_SIZE_ECB;
}
ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
return -EINVAL;
ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma))) {
dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
DMA_TO_DEVICE);
return -EINVAL;
}
return 0;
} }
/* /*
@ -288,24 +250,134 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
return -EINPROGRESS; return -EINPROGRESS;
} }
static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma) static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{ {
struct scatterlist *sg = dma->sg; struct mtk_aes_base_ctx *ctx = aes->ctx;
int nents = dma->nents;
if (!dma->remainder) dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
return; DMA_TO_DEVICE);
dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
DMA_TO_DEVICE);
while (--nents > 0 && sg) if (aes->src.sg == aes->dst.sg) {
sg = sg_next(sg); dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
DMA_BIDIRECTIONAL);
if (!sg) if (aes->src.sg != &aes->aligned_sg)
return; mtk_aes_restore_sg(&aes->src);
} else {
dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
DMA_FROM_DEVICE);
sg->length += dma->remainder; if (aes->dst.sg != &aes->aligned_sg)
mtk_aes_restore_sg(&aes->dst);
dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
DMA_TO_DEVICE);
if (aes->src.sg != &aes->aligned_sg)
mtk_aes_restore_sg(&aes->src);
}
if (aes->dst.sg == &aes->aligned_sg)
sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
aes->buf, aes->total);
} }
static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_base_ctx *ctx = aes->ctx;
ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
return -EINVAL;
ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma)))
goto tfm_map_err;
if (aes->src.sg == aes->dst.sg) {
aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
aes->src.nents,
DMA_BIDIRECTIONAL);
aes->dst.sg_len = aes->src.sg_len;
if (unlikely(!aes->src.sg_len))
goto sg_map_err;
} else {
aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
aes->src.nents, DMA_TO_DEVICE);
if (unlikely(!aes->src.sg_len))
goto sg_map_err;
aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
aes->dst.nents, DMA_FROM_DEVICE);
if (unlikely(!aes->dst.sg_len)) {
dma_unmap_sg(cryp->dev, aes->src.sg,
aes->src.nents, DMA_TO_DEVICE);
goto sg_map_err;
}
}
return mtk_aes_xmit(cryp, aes);
sg_map_err:
dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
DMA_TO_DEVICE);
tfm_map_err:
dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
DMA_TO_DEVICE);
return -EINVAL;
}
/* Initialize transform information of CBC/ECB mode */
static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
size_t len)
{
struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
struct mtk_aes_base_ctx *ctx = aes->ctx;
ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len);
ctx->ct.cmd[1] = AES_CMD1;
if (aes->flags & AES_FLAGS_ENCRYPT)
ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT;
else
ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN;
if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128))
ctx->tfm.ctrl[0] |= AES_TFM_128BITS;
else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
else
ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
if (aes->flags & AES_FLAGS_CBC) {
const u32 *iv = (const u32 *)req->info;
u32 *iv_state = ctx->tfm.state + ctx->keylen;
int i;
ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
SIZE_IN_WORDS(AES_BLOCK_SIZE));
ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV;
for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
iv_state[i] = cpu_to_le32(iv[i]);
ctx->ct.cmd[2] = AES_CMD2;
ctx->ct_size = AES_CT_SIZE_CBC;
} else if (aes->flags & AES_FLAGS_ECB) {
ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen);
ctx->tfm.ctrl[1] = AES_TFM_ECB;
ctx->ct_size = AES_CT_SIZE_ECB;
}
}
static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst,
size_t len) size_t len)
{ {
@ -346,28 +418,9 @@ static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen); sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
} }
if (aes->src.sg == aes->dst.sg) { mtk_aes_info_init(cryp, aes, len + padlen);
aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
aes->src.nents, DMA_BIDIRECTIONAL);
aes->dst.sg_len = aes->src.sg_len;
if (unlikely(!aes->src.sg_len))
return -EFAULT;
} else {
aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
aes->src.nents, DMA_TO_DEVICE);
if (unlikely(!aes->src.sg_len))
return -EFAULT;
aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg, return mtk_aes_map(cryp, aes);
aes->dst.nents, DMA_FROM_DEVICE);
if (unlikely(!aes->dst.sg_len)) {
dma_unmap_sg(cryp->dev, aes->src.sg,
aes->src.nents, DMA_TO_DEVICE);
return -EFAULT;
}
}
return mtk_aes_info_map(cryp, aes, len + padlen);
} }
static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
@ -419,54 +472,11 @@ static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{ {
struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
int err;
rctx = ablkcipher_request_ctx(req);
rctx->mode &= AES_FLAGS_MODE_MSK;
aes->flags = (aes->flags & ~AES_FLAGS_MODE_MSK) | rctx->mode;
mtk_aes_set_mode(aes, rctx);
aes->resume = mtk_aes_complete; aes->resume = mtk_aes_complete;
err = mtk_aes_map(cryp, aes, req->src, req->dst, req->nbytes); return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
if (err)
return err;
return mtk_aes_xmit(cryp, aes);
}
static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
{
struct mtk_aes_base_ctx *ctx = aes->ctx;
dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
DMA_TO_DEVICE);
dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
DMA_TO_DEVICE);
if (aes->src.sg == aes->dst.sg) {
dma_unmap_sg(cryp->dev, aes->src.sg,
aes->src.nents, DMA_BIDIRECTIONAL);
if (aes->src.sg != &aes->aligned_sg)
mtk_aes_restore_sg(&aes->src);
} else {
dma_unmap_sg(cryp->dev, aes->dst.sg,
aes->dst.nents, DMA_FROM_DEVICE);
if (aes->dst.sg != &aes->aligned_sg)
mtk_aes_restore_sg(&aes->dst);
dma_unmap_sg(cryp->dev, aes->src.sg,
aes->src.nents, DMA_TO_DEVICE);
if (aes->src.sg != &aes->aligned_sg)
mtk_aes_restore_sg(&aes->src);
}
if (aes->dst.sg == &aes->aligned_sg)
sg_copy_from_buffer(aes->real_dst,
sg_nents(aes->real_dst),
aes->buf, aes->total);
} }
/* Check and set the AES key to transform state buffer */ /* Check and set the AES key to transform state buffer */