crypto: caam - refactor ahash_done callbacks
Create two common ahash_done_* functions with the dma direction as parameter. Then, these 2 are called with the proper direction for unmap. Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com> Reviewed-by: Horia Geanta <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Родитель
b7f17fe281
Коммит
c3f7394eb9
|
@ -565,8 +565,8 @@ static inline void ahash_unmap_ctx(struct device *dev,
|
|||
ahash_unmap(dev, edesc, req, dst_len);
|
||||
}
|
||||
|
||||
static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context, enum dma_data_direction dir)
|
||||
{
|
||||
struct ahash_request *req = context;
|
||||
struct ahash_edesc *edesc;
|
||||
|
@ -582,7 +582,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
kfree(edesc);
|
||||
|
||||
|
@ -593,76 +593,20 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
|||
req->base.complete(&req->base, ecode);
|
||||
}
|
||||
|
||||
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct ahash_request *req = context;
|
||||
struct ahash_edesc *edesc;
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
int ecode = 0;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
|
||||
kfree(edesc);
|
||||
|
||||
scatterwalk_map_and_copy(state->buf, req->src,
|
||||
req->nbytes - state->next_buflen,
|
||||
state->next_buflen, 0);
|
||||
state->buflen = state->next_buflen;
|
||||
|
||||
print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
|
||||
state->buflen, 1);
|
||||
|
||||
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump_debug("result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
struct ahash_request *req = context;
|
||||
struct ahash_edesc *edesc;
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
int ecode = 0;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
kfree(edesc);
|
||||
|
||||
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context, enum dma_data_direction dir)
|
||||
{
|
||||
struct ahash_request *req = context;
|
||||
struct ahash_edesc *edesc;
|
||||
|
@ -678,7 +622,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
|||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
|
||||
kfree(edesc);
|
||||
|
||||
scatterwalk_map_and_copy(state->buf, req->src,
|
||||
|
@ -701,6 +645,18 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
|||
req->base.complete(&req->base, ecode);
|
||||
}
|
||||
|
||||
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an enhanced descriptor, which contains the hardware descriptor
|
||||
* and space for hardware scatter table containing sg_num entries.
|
||||
|
|
Загрузка…
Ссылка в новой задаче