crypto: inside-secure - fix request allocations in invalidation path
This patch makes use of the SKCIPHER_REQUEST_ON_STACK and
AHASH_REQUEST_ON_STACK helpers to allocate enough memory to contain both
the crypto request structures and their embedded context (__ctx).
Fixes: 1b44c5a60c
("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Suggested-by: Ofer Heifetz <oferh@marvell.com>
Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Родитель
0a02dcca12
Коммит
7cad2fabd5
|
@ -422,25 +422,25 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
struct safexcel_crypto_priv *priv = ctx->priv;
|
struct safexcel_crypto_priv *priv = ctx->priv;
|
||||||
struct skcipher_request req;
|
SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
|
||||||
struct safexcel_cipher_req *sreq = skcipher_request_ctx(&req);
|
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
|
||||||
struct safexcel_inv_result result = {};
|
struct safexcel_inv_result result = {};
|
||||||
int ring = ctx->base.ring;
|
int ring = ctx->base.ring;
|
||||||
|
|
||||||
memset(&req, 0, sizeof(struct skcipher_request));
|
memset(req, 0, sizeof(struct skcipher_request));
|
||||||
|
|
||||||
/* create invalidation request */
|
/* create invalidation request */
|
||||||
init_completion(&result.completion);
|
init_completion(&result.completion);
|
||||||
skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
safexcel_inv_complete, &result);
|
safexcel_inv_complete, &result);
|
||||||
|
|
||||||
skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
|
skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
|
||||||
ctx = crypto_tfm_ctx(req.base.tfm);
|
ctx = crypto_tfm_ctx(req->base.tfm);
|
||||||
ctx->base.exit_inv = true;
|
ctx->base.exit_inv = true;
|
||||||
sreq->needs_inv = true;
|
sreq->needs_inv = true;
|
||||||
|
|
||||||
spin_lock_bh(&priv->ring[ring].queue_lock);
|
spin_lock_bh(&priv->ring[ring].queue_lock);
|
||||||
crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
|
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
||||||
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
||||||
|
|
||||||
if (!priv->ring[ring].need_dequeue)
|
if (!priv->ring[ring].need_dequeue)
|
||||||
|
|
|
@ -450,25 +450,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
struct safexcel_crypto_priv *priv = ctx->priv;
|
struct safexcel_crypto_priv *priv = ctx->priv;
|
||||||
struct ahash_request req;
|
AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
|
||||||
struct safexcel_ahash_req *rctx = ahash_request_ctx(&req);
|
struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
|
||||||
struct safexcel_inv_result result = {};
|
struct safexcel_inv_result result = {};
|
||||||
int ring = ctx->base.ring;
|
int ring = ctx->base.ring;
|
||||||
|
|
||||||
memset(&req, 0, sizeof(struct ahash_request));
|
memset(req, 0, sizeof(struct ahash_request));
|
||||||
|
|
||||||
/* create invalidation request */
|
/* create invalidation request */
|
||||||
init_completion(&result.completion);
|
init_completion(&result.completion);
|
||||||
ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
safexcel_inv_complete, &result);
|
safexcel_inv_complete, &result);
|
||||||
|
|
||||||
ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
|
ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
|
||||||
ctx = crypto_tfm_ctx(req.base.tfm);
|
ctx = crypto_tfm_ctx(req->base.tfm);
|
||||||
ctx->base.exit_inv = true;
|
ctx->base.exit_inv = true;
|
||||||
rctx->needs_inv = true;
|
rctx->needs_inv = true;
|
||||||
|
|
||||||
spin_lock_bh(&priv->ring[ring].queue_lock);
|
spin_lock_bh(&priv->ring[ring].queue_lock);
|
||||||
crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
|
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
||||||
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
||||||
|
|
||||||
if (!priv->ring[ring].need_dequeue)
|
if (!priv->ring[ring].need_dequeue)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче