crypto: mediatek - rework interrupt handler

This patch removes redundant task that used to handle interrupt
from ring manager, so that the same task/handler can be shared.
It also uses aes->id and sha-id to distinguish interrupt sources.

Signed-off-by: Ryder Lee <ryder.lee@mediatek.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Ryder Lee 2017-03-09 10:11:12 +08:00 коммит произвёл Herbert Xu
Родитель 43ec540e6f
Коммит 132c57caef
3 изменённых файлов: 49 добавлений и 101 удалений

Просмотреть файл

@ -1092,55 +1092,26 @@ static struct aead_alg aes_gcm_alg = {
}, },
}; };
static void mtk_aes_enc_task(unsigned long data) static void mtk_aes_done_task(unsigned long data)
{ {
struct mtk_cryp *cryp = (struct mtk_cryp *)data; struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
struct mtk_aes_rec *aes = cryp->aes[0]; struct mtk_cryp *cryp = aes->cryp;
mtk_aes_unmap(cryp, aes); mtk_aes_unmap(cryp, aes);
aes->resume(cryp, aes); aes->resume(cryp, aes);
} }
static void mtk_aes_dec_task(unsigned long data) static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
{ {
struct mtk_cryp *cryp = (struct mtk_cryp *)data; struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
struct mtk_aes_rec *aes = cryp->aes[1]; struct mtk_cryp *cryp = aes->cryp;
u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
mtk_aes_unmap(cryp, aes); mtk_aes_write(cryp, RDR_STAT(aes->id), val);
aes->resume(cryp, aes);
}
static irqreturn_t mtk_aes_enc_irq(int irq, void *dev_id)
{
struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
struct mtk_aes_rec *aes = cryp->aes[0];
u32 val = mtk_aes_read(cryp, RDR_STAT(RING0));
mtk_aes_write(cryp, RDR_STAT(RING0), val);
if (likely(AES_FLAGS_BUSY & aes->flags)) { if (likely(AES_FLAGS_BUSY & aes->flags)) {
mtk_aes_write(cryp, RDR_PROC_COUNT(RING0), MTK_CNT_RST); mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
mtk_aes_write(cryp, RDR_THRESH(RING0), mtk_aes_write(cryp, RDR_THRESH(aes->id),
MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
tasklet_schedule(&aes->task);
} else {
dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
}
return IRQ_HANDLED;
}
static irqreturn_t mtk_aes_dec_irq(int irq, void *dev_id)
{
struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
struct mtk_aes_rec *aes = cryp->aes[1];
u32 val = mtk_aes_read(cryp, RDR_STAT(RING1));
mtk_aes_write(cryp, RDR_STAT(RING1), val);
if (likely(AES_FLAGS_BUSY & aes->flags)) {
mtk_aes_write(cryp, RDR_PROC_COUNT(RING1), MTK_CNT_RST);
mtk_aes_write(cryp, RDR_THRESH(RING1),
MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
tasklet_schedule(&aes->task); tasklet_schedule(&aes->task);
@ -1171,14 +1142,18 @@ static int mtk_aes_record_init(struct mtk_cryp *cryp)
if (!aes[i]->buf) if (!aes[i]->buf)
goto err_cleanup; goto err_cleanup;
aes[i]->id = i; aes[i]->cryp = cryp;
spin_lock_init(&aes[i]->lock); spin_lock_init(&aes[i]->lock);
crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE); crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
tasklet_init(&aes[i]->task, mtk_aes_done_task,
(unsigned long)aes[i]);
} }
tasklet_init(&aes[0]->task, mtk_aes_enc_task, (unsigned long)cryp); /* Link to ring0 and ring1 respectively */
tasklet_init(&aes[1]->task, mtk_aes_dec_task, (unsigned long)cryp); aes[0]->id = RING0;
aes[1]->id = RING1;
return 0; return 0;
@ -1246,19 +1221,17 @@ int mtk_cipher_alg_register(struct mtk_cryp *cryp)
if (ret) if (ret)
goto err_record; goto err_record;
/* Ring0 is use by encryption record */ ret = devm_request_irq(cryp->dev, cryp->irq[RING0], mtk_aes_irq,
ret = devm_request_irq(cryp->dev, cryp->irq[RING0], mtk_aes_enc_irq, 0, "mtk-aes", cryp->aes[0]);
IRQF_TRIGGER_LOW, "mtk-aes", cryp);
if (ret) { if (ret) {
dev_err(cryp->dev, "unable to request AES encryption irq.\n"); dev_err(cryp->dev, "unable to request AES irq.\n");
goto err_res; goto err_res;
} }
/* Ring1 is use by decryption record */ ret = devm_request_irq(cryp->dev, cryp->irq[RING1], mtk_aes_irq,
ret = devm_request_irq(cryp->dev, cryp->irq[RING1], mtk_aes_dec_irq, 0, "mtk-aes", cryp->aes[1]);
IRQF_TRIGGER_LOW, "mtk-aes", cryp);
if (ret) { if (ret) {
dev_err(cryp->dev, "unable to request AES decryption irq.\n"); dev_err(cryp->dev, "unable to request AES irq.\n");
goto err_res; goto err_res;
} }

Просмотреть файл

@ -125,6 +125,7 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
/** /**
* struct mtk_aes_rec - AES operation record * struct mtk_aes_rec - AES operation record
* @cryp: pointer to Cryptographic device
* @queue: crypto request queue * @queue: crypto request queue
* @areq: pointer to async request * @areq: pointer to async request
* @task: the tasklet is use in AES interrupt * @task: the tasklet is use in AES interrupt
@ -143,6 +144,7 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
* Structure used to record AES execution state. * Structure used to record AES execution state.
*/ */
struct mtk_aes_rec { struct mtk_aes_rec {
struct mtk_cryp *cryp;
struct crypto_queue queue; struct crypto_queue queue;
struct crypto_async_request *areq; struct crypto_async_request *areq;
struct tasklet_struct task; struct tasklet_struct task;
@ -166,6 +168,7 @@ struct mtk_aes_rec {
/** /**
* struct mtk_sha_rec - SHA operation record * struct mtk_sha_rec - SHA operation record
* @cryp: pointer to Cryptographic device
* @queue: crypto request queue * @queue: crypto request queue
* @req: pointer to ahash request * @req: pointer to ahash request
* @task: the tasklet is use in SHA interrupt * @task: the tasklet is use in SHA interrupt
@ -176,6 +179,7 @@ struct mtk_aes_rec {
* Structure used to record SHA execution state. * Structure used to record SHA execution state.
*/ */
struct mtk_sha_rec { struct mtk_sha_rec {
struct mtk_cryp *cryp;
struct crypto_queue queue; struct crypto_queue queue;
struct ahash_request *req; struct ahash_request *req;
struct tasklet_struct task; struct tasklet_struct task;

Просмотреть файл

@ -1216,60 +1216,31 @@ static struct ahash_alg algs_sha384_sha512[] = {
}, },
}; };
static void mtk_sha_task0(unsigned long data) static void mtk_sha_done_task(unsigned long data)
{ {
struct mtk_cryp *cryp = (struct mtk_cryp *)data; struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
struct mtk_sha_rec *sha = cryp->sha[0]; struct mtk_cryp *cryp = sha->cryp;
mtk_sha_unmap(cryp, sha); mtk_sha_unmap(cryp, sha);
mtk_sha_complete(cryp, sha); mtk_sha_complete(cryp, sha);
} }
static void mtk_sha_task1(unsigned long data) static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
{ {
struct mtk_cryp *cryp = (struct mtk_cryp *)data; struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
struct mtk_sha_rec *sha = cryp->sha[1]; struct mtk_cryp *cryp = sha->cryp;
u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
mtk_sha_unmap(cryp, sha); mtk_sha_write(cryp, RDR_STAT(sha->id), val);
mtk_sha_complete(cryp, sha);
}
static irqreturn_t mtk_sha_ring2_irq(int irq, void *dev_id)
{
struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
struct mtk_sha_rec *sha = cryp->sha[0];
u32 val = mtk_sha_read(cryp, RDR_STAT(RING2));
mtk_sha_write(cryp, RDR_STAT(RING2), val);
if (likely((SHA_FLAGS_BUSY & sha->flags))) { if (likely((SHA_FLAGS_BUSY & sha->flags))) {
mtk_sha_write(cryp, RDR_PROC_COUNT(RING2), MTK_CNT_RST); mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
mtk_sha_write(cryp, RDR_THRESH(RING2), mtk_sha_write(cryp, RDR_THRESH(sha->id),
MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
tasklet_schedule(&sha->task); tasklet_schedule(&sha->task);
} else { } else {
dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
}
return IRQ_HANDLED;
}
static irqreturn_t mtk_sha_ring3_irq(int irq, void *dev_id)
{
struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
struct mtk_sha_rec *sha = cryp->sha[1];
u32 val = mtk_sha_read(cryp, RDR_STAT(RING3));
mtk_sha_write(cryp, RDR_STAT(RING3), val);
if (likely((SHA_FLAGS_BUSY & sha->flags))) {
mtk_sha_write(cryp, RDR_PROC_COUNT(RING3), MTK_CNT_RST);
mtk_sha_write(cryp, RDR_THRESH(RING3),
MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
tasklet_schedule(&sha->task);
} else {
dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
} }
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -1288,14 +1259,18 @@ static int mtk_sha_record_init(struct mtk_cryp *cryp)
if (!sha[i]) if (!sha[i])
goto err_cleanup; goto err_cleanup;
sha[i]->id = i + RING2; sha[i]->cryp = cryp;
spin_lock_init(&sha[i]->lock); spin_lock_init(&sha[i]->lock);
crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE); crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
tasklet_init(&sha[i]->task, mtk_sha_done_task,
(unsigned long)sha[i]);
} }
tasklet_init(&sha[0]->task, mtk_sha_task0, (unsigned long)cryp); /* Link to ring2 and ring3 respectively */
tasklet_init(&sha[1]->task, mtk_sha_task1, (unsigned long)cryp); sha[0]->id = RING2;
sha[1]->id = RING3;
cryp->rec = 1; cryp->rec = 1;
@ -1368,19 +1343,15 @@ int mtk_hash_alg_register(struct mtk_cryp *cryp)
if (err) if (err)
goto err_record; goto err_record;
/* Ring2 is use by SHA record0 */ err = devm_request_irq(cryp->dev, cryp->irq[RING2], mtk_sha_irq,
err = devm_request_irq(cryp->dev, cryp->irq[RING2], 0, "mtk-sha", cryp->sha[0]);
mtk_sha_ring2_irq, IRQF_TRIGGER_LOW,
"mtk-sha", cryp);
if (err) { if (err) {
dev_err(cryp->dev, "unable to request sha irq0.\n"); dev_err(cryp->dev, "unable to request sha irq0.\n");
goto err_res; goto err_res;
} }
/* Ring3 is use by SHA record1 */ err = devm_request_irq(cryp->dev, cryp->irq[RING3], mtk_sha_irq,
err = devm_request_irq(cryp->dev, cryp->irq[RING3], 0, "mtk-sha", cryp->sha[1]);
mtk_sha_ring3_irq, IRQF_TRIGGER_LOW,
"mtk-sha", cryp);
if (err) { if (err) {
dev_err(cryp->dev, "unable to request sha irq1.\n"); dev_err(cryp->dev, "unable to request sha irq1.\n");
goto err_res; goto err_res;