staging: ccree: Fix alignment issues in ssi_buffer_mgr.c

Fixes checkpatch.pl alignment warnings.

Signed-off-by: Simon Sandström <simon@nikanor.nu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Simon Sandström 2017-07-02 01:25:47 +02:00 коммит произвёл Greg Kroah-Hartman
Родитель b7e607bf33
Коммит 5dff9f5e69
1 изменённых файлов: 164 добавлений и 131 удалений

Просмотреть файл

@ -434,9 +434,8 @@ ssi_aead_handle_config_buf(struct device *dev,
if (assoclen > 0) {
ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
&areq_ctx->ccm_adata_sg,
(AES_BLOCK_SIZE +
areq_ctx->ccm_hdr_size), 0,
false, NULL);
(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
0, false, NULL);
}
return 0;
}
@ -498,16 +497,12 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
req_ctx->mlli_params.mlli_dma_addr);
}
dma_unmap_sg(dev, src, req_ctx->in_nents,
DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
sg_virt(src));
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped req->src=%pK\n", sg_virt(src));
if (src != dst) {
dma_unmap_sg(dev, dst, req_ctx->out_nents,
DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
sg_virt(dst));
dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped req->dst=%pK\n", sg_virt(dst));
}
}
@ -556,8 +551,10 @@ int ssi_buffer_mgr_map_blkcipher_request(
/* Map the src SGL */
rc = ssi_buffer_mgr_map_scatterlist(dev, src,
nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
nbytes, DMA_BIDIRECTIONAL,
&req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
&mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto ablkcipher_exit;
@ -570,8 +567,10 @@ int ssi_buffer_mgr_map_blkcipher_request(
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
req_ctx->out_nents = 0;
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
req_ctx->in_nents, src,
nbytes, 0, true, &req_ctx->in_mlli_nents);
req_ctx->in_nents,
src, nbytes, 0,
true,
&req_ctx->in_mlli_nents);
}
} else {
/* Map the dst sg */
@ -588,12 +587,14 @@ int ssi_buffer_mgr_map_blkcipher_request(
if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
req_ctx->in_nents, src,
nbytes, 0, true,
req_ctx->in_nents,
src, nbytes, 0,
true,
&req_ctx->in_mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
req_ctx->out_nents, dst,
nbytes, 0, true,
req_ctx->out_nents,
dst, nbytes, 0,
true,
&req_ctx->out_mlli_nents);
}
}
@ -691,7 +692,11 @@ void ssi_buffer_mgr_unmap_aead_request(
if (unlikely(req->src != req->dst)) {
SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained),
dma_unmap_sg(dev, req->dst,
ssi_buffer_mgr_get_sgl_nents(req->dst,
size_to_unmap,
&dummy,
&chained),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@ -867,8 +872,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (unlikely((mapped_nents + 1) >
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
SSI_LOG_ERR("CCM case.Too many fragments. "
"Current %d max %d\n",
SSI_LOG_ERR("CCM case.Too many fragments. Current %d max %d\n",
(areq_ctx->assoc.nents + 1),
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
rc = -ENOMEM;
@ -950,12 +954,17 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
if (likely(req->src == req->dst)) {
/*INPLACE*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->src.nents, areq_ctx->srcSgl,
areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
areq_ctx->src.nents,
areq_ctx->srcSgl,
areq_ctx->cryptlen,
areq_ctx->srcOffset,
is_last_table,
&areq_ctx->src.mlli_nents);
icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
areq_ctx->src.nents, authsize, *src_last_bytes,
areq_ctx->src.nents,
authsize,
*src_last_bytes,
&areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
@ -1004,16 +1013,24 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->src.nents, areq_ctx->srcSgl,
areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
areq_ctx->src.nents,
areq_ctx->srcSgl,
areq_ctx->cryptlen,
areq_ctx->srcOffset,
is_last_table,
&areq_ctx->src.mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->dst.nents, areq_ctx->dstSgl,
areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
areq_ctx->dst.nents,
areq_ctx->dstSgl,
areq_ctx->cryptlen,
areq_ctx->dstOffset,
is_last_table,
&areq_ctx->dst.mlli_nents);
icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
areq_ctx->src.nents, authsize, *src_last_bytes,
areq_ctx->src.nents,
authsize,
*src_last_bytes,
&areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
@ -1048,16 +1065,24 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else {
/*NON-INPLACE and ENCRYPT*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->dst.nents, areq_ctx->dstSgl,
areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
areq_ctx->dst.nents,
areq_ctx->dstSgl,
areq_ctx->cryptlen,
areq_ctx->dstOffset,
is_last_table,
&areq_ctx->dst.mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->src.nents, areq_ctx->srcSgl,
areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
areq_ctx->src.nents,
areq_ctx->srcSgl,
areq_ctx->cryptlen,
areq_ctx->srcOffset,
is_last_table,
&areq_ctx->src.mlli_nents);
icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
areq_ctx->dst.nents, authsize, *dst_last_bytes,
areq_ctx->dst.nents,
authsize,
*dst_last_bytes,
&areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
@ -1150,8 +1175,10 @@ static inline int ssi_buffer_mgr_aead_chain_data(
size_for_map += crypto_aead_ivsize(tfm);
rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
DMA_BIDIRECTIONAL,
&(areq_ctx->dst.nents),
LLI_MAX_NUM_OF_DATA_ENTRIES,
&dst_last_bytes,
&dst_mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
@ -1186,8 +1213,11 @@ static inline int ssi_buffer_mgr_aead_chain_data(
(dst_mapped_nents > 1) ||
do_chain) {
areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
&src_last_bytes, &dst_last_bytes, is_last_table);
rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req,
sg_data,
&src_last_bytes,
&dst_last_bytes,
is_last_table);
} else {
areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
ssi_buffer_mgr_prepare_aead_data_dlli(
@ -1315,7 +1345,8 @@ int ssi_buffer_mgr_map_aead_request(
goto aead_map_failure;
}
if (ssi_aead_handle_config_buf(dev, areq_ctx,
areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
areq_ctx->ccm_config, &sg_data,
req->assoclen) != 0) {
rc = -ENOMEM;
goto aead_map_failure;
}
@ -1493,12 +1524,12 @@ int ssi_buffer_mgr_map_hash_request_final(
}
if (src && (nbytes > 0) && do_update) {
if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
nbytes,
if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, nbytes,
DMA_TO_DEVICE,
&areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents))){
&dummy,
&mapped_nents))){
goto unmap_curr_buff;
}
if (src && (mapped_nents == 1)
@ -1519,9 +1550,8 @@ int ssi_buffer_mgr_map_hash_request_final(
/* add the src data to the sg_data */
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
areq_ctx->in_nents,
src,
nbytes, 0,
true, &areq_ctx->mlli_nents);
src, nbytes, 0, true,
&areq_ctx->mlli_nents);
if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
mlli_params) != 0)) {
goto fail_unmap_din;
@ -1629,7 +1659,8 @@ int ssi_buffer_mgr_map_hash_request_update(
DMA_TO_DEVICE,
&areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents))){
&dummy,
&mapped_nents))){
goto unmap_curr_buff;
}
if ((mapped_nents == 1)
@ -1651,8 +1682,10 @@ int ssi_buffer_mgr_map_hash_request_update(
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
areq_ctx->in_nents,
src,
(update_data_len - *curr_buff_cnt), 0,
true, &areq_ctx->mlli_nents);
(update_data_len - *curr_buff_cnt),
0,
true,
&areq_ctx->mlli_nents);
if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
mlli_params) != 0)) {
goto fail_unmap_din;