crypto: rockchip - add debugfs
This patch enable to access usage stats for each algorithm. Reviewed-by: John Keeping <john@metanate.com> Signed-off-by: Corentin Labbe <clabbe@baylibre.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Родитель
6d55c4a206
Коммит
48d904d428
|
@ -686,6 +686,16 @@ config CRYPTO_DEV_ROCKCHIP
|
||||||
This driver interfaces with the hardware crypto accelerator.
|
This driver interfaces with the hardware crypto accelerator.
|
||||||
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
|
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
|
||||||
|
|
||||||
|
config CRYPTO_DEV_ROCKCHIP_DEBUG
|
||||||
|
bool "Enable Rockchip crypto stats"
|
||||||
|
depends on CRYPTO_DEV_ROCKCHIP
|
||||||
|
depends on DEBUG_FS
|
||||||
|
help
|
||||||
|
Say y to enable Rockchip crypto debug stats.
|
||||||
|
This will create /sys/kernel/debug/rk3288_crypto/stats for displaying
|
||||||
|
the number of requests per algorithm and other internal stats.
|
||||||
|
|
||||||
|
|
||||||
config CRYPTO_DEV_ZYNQMP_AES
|
config CRYPTO_DEV_ZYNQMP_AES
|
||||||
tristate "Support for Xilinx ZynqMP AES hw accelerator"
|
tristate "Support for Xilinx ZynqMP AES hw accelerator"
|
||||||
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
|
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
|
||||||
|
|
|
@ -95,6 +95,41 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
|
||||||
&rk_ahash_md5,
|
&rk_ahash_md5,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
|
||||||
|
static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
|
||||||
|
if (!rk_cipher_algs[i]->dev)
|
||||||
|
continue;
|
||||||
|
switch (rk_cipher_algs[i]->type) {
|
||||||
|
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||||
|
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
|
||||||
|
rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name,
|
||||||
|
rk_cipher_algs[i]->alg.skcipher.base.cra_name,
|
||||||
|
rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
|
||||||
|
seq_printf(seq, "\tfallback due to length: %lu\n",
|
||||||
|
rk_cipher_algs[i]->stat_fb_len);
|
||||||
|
seq_printf(seq, "\tfallback due to alignment: %lu\n",
|
||||||
|
rk_cipher_algs[i]->stat_fb_align);
|
||||||
|
seq_printf(seq, "\tfallback due to SGs: %lu\n",
|
||||||
|
rk_cipher_algs[i]->stat_fb_sgdiff);
|
||||||
|
break;
|
||||||
|
case CRYPTO_ALG_TYPE_AHASH:
|
||||||
|
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
|
||||||
|
rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name,
|
||||||
|
rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
|
||||||
|
rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
|
||||||
|
#endif
|
||||||
|
|
||||||
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
|
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
|
||||||
{
|
{
|
||||||
unsigned int i, k;
|
unsigned int i, k;
|
||||||
|
@ -246,6 +281,15 @@ static int rk_crypto_probe(struct platform_device *pdev)
|
||||||
goto err_register_alg;
|
goto err_register_alg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
|
||||||
|
/* Ignore error of debugfs */
|
||||||
|
crypto_info->dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
|
||||||
|
crypto_info->dbgfs_stats = debugfs_create_file("stats", 0444,
|
||||||
|
crypto_info->dbgfs_dir,
|
||||||
|
crypto_info,
|
||||||
|
&rk_crypto_debugfs_fops);
|
||||||
|
#endif
|
||||||
|
|
||||||
dev_info(dev, "Crypto Accelerator successfully registered\n");
|
dev_info(dev, "Crypto Accelerator successfully registered\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -260,6 +304,9 @@ static int rk_crypto_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
|
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
|
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
|
||||||
|
debugfs_remove_recursive(crypto_tmp->dbgfs_dir);
|
||||||
|
#endif
|
||||||
rk_crypto_unregister();
|
rk_crypto_unregister();
|
||||||
rk_crypto_disable_clk(crypto_tmp);
|
rk_crypto_disable_clk(crypto_tmp);
|
||||||
crypto_engine_exit(crypto_tmp->engine);
|
crypto_engine_exit(crypto_tmp->engine);
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#include <crypto/algapi.h>
|
#include <crypto/algapi.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/debugfs.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <crypto/engine.h>
|
#include <crypto/engine.h>
|
||||||
|
@ -199,6 +200,10 @@ struct rk_crypto_info {
|
||||||
struct crypto_engine *engine;
|
struct crypto_engine *engine;
|
||||||
struct completion complete;
|
struct completion complete;
|
||||||
int status;
|
int status;
|
||||||
|
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
|
||||||
|
struct dentry *dbgfs_dir;
|
||||||
|
struct dentry *dbgfs_stats;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/* the private variable of hash */
|
/* the private variable of hash */
|
||||||
|
@ -239,6 +244,12 @@ struct rk_crypto_tmp {
|
||||||
struct skcipher_alg skcipher;
|
struct skcipher_alg skcipher;
|
||||||
struct ahash_alg hash;
|
struct ahash_alg hash;
|
||||||
} alg;
|
} alg;
|
||||||
|
unsigned long stat_req;
|
||||||
|
unsigned long stat_fb;
|
||||||
|
unsigned long stat_fb_len;
|
||||||
|
unsigned long stat_fb_sglen;
|
||||||
|
unsigned long stat_fb_align;
|
||||||
|
unsigned long stat_fb_sgdiff;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct rk_crypto_tmp rk_ecb_aes_alg;
|
extern struct rk_crypto_tmp rk_ecb_aes_alg;
|
||||||
|
|
|
@ -39,6 +39,10 @@ static int rk_ahash_digest_fb(struct ahash_request *areq)
|
||||||
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
|
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
||||||
struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
|
struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
|
||||||
|
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
|
||||||
|
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
|
||||||
|
|
||||||
|
algt->stat_fb++;
|
||||||
|
|
||||||
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
|
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
|
||||||
rctx->fallback_req.base.flags = areq->base.flags &
|
rctx->fallback_req.base.flags = areq->base.flags &
|
||||||
|
@ -249,6 +253,8 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
|
||||||
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
|
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
|
||||||
struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
|
struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
|
||||||
|
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
|
||||||
|
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
|
||||||
struct scatterlist *sg = areq->src;
|
struct scatterlist *sg = areq->src;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int i;
|
int i;
|
||||||
|
@ -256,6 +262,8 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
|
||||||
|
|
||||||
rctx->mode = 0;
|
rctx->mode = 0;
|
||||||
|
|
||||||
|
algt->stat_req++;
|
||||||
|
|
||||||
switch (crypto_ahash_digestsize(tfm)) {
|
switch (crypto_ahash_digestsize(tfm)) {
|
||||||
case SHA1_DIGEST_SIZE:
|
case SHA1_DIGEST_SIZE:
|
||||||
rctx->mode = RK_CRYPTO_HASH_SHA1;
|
rctx->mode = RK_CRYPTO_HASH_SHA1;
|
||||||
|
|
|
@ -18,6 +18,8 @@ static int rk_cipher_need_fallback(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
unsigned int bs = crypto_skcipher_blocksize(tfm);
|
unsigned int bs = crypto_skcipher_blocksize(tfm);
|
||||||
|
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||||
|
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
|
||||||
struct scatterlist *sgs, *sgd;
|
struct scatterlist *sgs, *sgd;
|
||||||
unsigned int stodo, dtodo, len;
|
unsigned int stodo, dtodo, len;
|
||||||
|
|
||||||
|
@ -29,20 +31,25 @@ static int rk_cipher_need_fallback(struct skcipher_request *req)
|
||||||
sgd = req->dst;
|
sgd = req->dst;
|
||||||
while (sgs && sgd) {
|
while (sgs && sgd) {
|
||||||
if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
|
if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
|
||||||
|
algt->stat_fb_align++;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
|
if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
|
||||||
|
algt->stat_fb_align++;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
stodo = min(len, sgs->length);
|
stodo = min(len, sgs->length);
|
||||||
if (stodo % bs) {
|
if (stodo % bs) {
|
||||||
|
algt->stat_fb_len++;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
dtodo = min(len, sgd->length);
|
dtodo = min(len, sgd->length);
|
||||||
if (dtodo % bs) {
|
if (dtodo % bs) {
|
||||||
|
algt->stat_fb_len++;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (stodo != dtodo) {
|
if (stodo != dtodo) {
|
||||||
|
algt->stat_fb_sgdiff++;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
len -= stodo;
|
len -= stodo;
|
||||||
|
@ -57,8 +64,12 @@ static int rk_cipher_fallback(struct skcipher_request *areq)
|
||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
|
||||||
struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
|
struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
|
||||||
struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
|
struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
|
||||||
|
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||||
|
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
algt->stat_fb++;
|
||||||
|
|
||||||
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
|
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
|
||||||
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
|
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
|
||||||
areq->base.complete, areq->base.data);
|
areq->base.complete, areq->base.data);
|
||||||
|
@ -324,6 +335,10 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
|
||||||
u8 *ivtouse = areq->iv;
|
u8 *ivtouse = areq->iv;
|
||||||
unsigned int len = areq->cryptlen;
|
unsigned int len = areq->cryptlen;
|
||||||
unsigned int todo;
|
unsigned int todo;
|
||||||
|
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||||
|
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
|
||||||
|
|
||||||
|
algt->stat_req++;
|
||||||
|
|
||||||
ivsize = crypto_skcipher_ivsize(tfm);
|
ivsize = crypto_skcipher_ivsize(tfm);
|
||||||
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
|
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
|
||||||
|
|
Загрузка…
Ссылка в новой задаче