crypto: qat - extend buffer list interface

The compression service requires an additional pre-allocated buffer for
each destination scatter list.
Extend the function qat_alg_sgl_to_bufl() to take an additional
structure that contains the dma address and the size of the extra
buffer which will be appended in the destination FW SGL.

The logic that unmaps buffers in qat_alg_free_bufl() has been changed to
start unmapping from buffer 0 instead of skipping the initial buffers
num_buff - num_mapped_bufs as that functionality was not used in the
code.

Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
Reviewed-by: Adam Guerin <adam.guerin@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Giovanni Cabiddu 2022-11-28 12:21:16 +00:00 коммит произвёл Herbert Xu
Родитель 36ebc7472a
Коммит cf692906bd
3 изменённых файлов: 54 добавлений и 18 удалений

Просмотреть файл

@ -800,7 +800,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
return -EINVAL;
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
&qat_req->buf, f);
&qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@ -844,7 +844,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
return -EINVAL;
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
&qat_req->buf, f);
&qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@ -1030,7 +1030,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
return 0;
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
&qat_req->buf, f);
&qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@ -1097,7 +1097,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
return 0;
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
&qat_req->buf, f);
&qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;

Просмотреть файл

@ -35,10 +35,7 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
kfree(bl);
if (blp != blpout) {
/* If out of place operation dma unmap only data */
int bufless = blout->num_bufs - blout->num_mapped_bufs;
for (i = bufless; i < blout->num_bufs; i++) {
for (i = 0; i < blout->num_mapped_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
DMA_FROM_DEVICE);
@ -50,11 +47,13 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
}
}
int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct scatterlist *sglout,
struct qat_request_buffs *buf,
gfp_t flags)
static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct scatterlist *sglout,
struct qat_request_buffs *buf,
dma_addr_t extra_dst_buff,
size_t sz_extra_dst_buff,
gfp_t flags)
{
struct device *dev = &GET_DEV(accel_dev);
int i, sg_nctr = 0;
@ -86,7 +85,7 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for_each_sg(sgl, sg, n, i)
for (i = 0; i < n; i++)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sgl, sg, n, i) {
@ -113,8 +112,10 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
/* Handle out of place operation */
if (sgl != sglout) {
struct qat_alg_buf *bufers;
int extra_buff = extra_dst_buff ? 1 : 0;
int n_sglout = sg_nents(sglout);
n = sg_nents(sglout);
n = n_sglout + extra_buff;
sz_out = struct_size(buflout, bufers, n);
sg_nctr = 0;
@ -129,10 +130,10 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
}
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i)
for (i = 0; i < n; i++)
bufers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sglout, sg, n, i) {
for_each_sg(sglout, sg, n_sglout, i) {
int y = sg_nctr;
if (!sg->length)
@ -146,7 +147,13 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
bufers[y].len = sg->length;
sg_nctr++;
}
if (extra_buff) {
bufers[sg_nctr].addr = extra_dst_buff;
bufers[sg_nctr].len = sz_extra_dst_buff;
}
buflout->num_bufs = sg_nctr;
buflout->num_bufs += extra_buff;
buflout->num_mapped_bufs = sg_nctr;
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, bloutp)))
@ -166,11 +173,14 @@ err_out:
dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
n = sg_nents(sglout);
for (i = 0; i < n; i++)
for (i = 0; i < n; i++) {
if (buflout->bufers[i].addr == extra_dst_buff)
break;
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
DMA_FROM_DEVICE);
}
if (!buf->sgl_dst_valid)
kfree(buflout);
@ -192,3 +202,23 @@ err_in:
dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
}
int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct scatterlist *sglout,
struct qat_request_buffs *buf,
struct qat_sgl_to_bufl_params *params,
gfp_t flags)
{
dma_addr_t extra_dst_buff = 0;
size_t sz_extra_dst_buff = 0;
if (params) {
extra_dst_buff = params->extra_dst_buff;
sz_extra_dst_buff = params->sz_extra_dst_buff;
}
return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
extra_dst_buff, sz_extra_dst_buff,
flags);
}

Просмотреть файл

@ -38,12 +38,18 @@ struct qat_request_buffs {
struct qat_alg_fixed_buf_list sgl_dst;
};
struct qat_sgl_to_bufl_params {
dma_addr_t extra_dst_buff;
size_t sz_extra_dst_buff;
};
void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
struct qat_request_buffs *buf);
int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct scatterlist *sglout,
struct qat_request_buffs *buf,
struct qat_sgl_to_bufl_params *params,
gfp_t flags);
#endif