Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (57 commits) crypto: aes - Precompute tables crypto: talitos - Ack done interrupt in isr instead of tasklet crypto: testmgr - Correct comment about deflate parameters crypto: salsa20 - Remove private wrappers around various operations crypto: des3_ede - permit weak keys unless REQ_WEAK_KEY set crypto: sha512 - Switch to shash crypto: sha512 - Move message schedule W[80] to static percpu area crypto: michael_mic - Switch to shash crypto: wp512 - Switch to shash crypto: tgr192 - Switch to shash crypto: sha256 - Switch to shash crypto: md5 - Switch to shash crypto: md4 - Switch to shash crypto: sha1 - Switch to shash crypto: rmd320 - Switch to shash crypto: rmd256 - Switch to shash crypto: rmd160 - Switch to shash crypto: rmd128 - Switch to shash crypto: null - Switch to shash crypto: hash - Make setkey optional ...
This commit is contained in:
Коммит
e14e61e967
|
@ -6,13 +6,22 @@
|
|||
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual
|
||||
* Volume 2A: Instruction Set Reference, A-M
|
||||
*
|
||||
* Copyright (c) 2008 Austin Zhang <austin_zhang@linux.intel.com>
|
||||
* Copyright (c) 2008 Kent Liu <kent.liu@intel.com>
|
||||
* Copyright (C) 2008 Intel Corporation
|
||||
* Authors: Austin Zhang <austin_zhang@linux.intel.com>
|
||||
* Kent Liu <kent.liu@intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
|
@ -75,99 +84,92 @@ static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len
|
|||
* If your algorithm starts with ~0, then XOR with ~0 before you set
|
||||
* the seed.
|
||||
*/
|
||||
static int crc32c_intel_setkey(struct crypto_ahash *hash, const u8 *key,
|
||||
static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
u32 *mctx = crypto_ahash_ctx(hash);
|
||||
u32 *mctx = crypto_shash_ctx(hash);
|
||||
|
||||
if (keylen != sizeof(u32)) {
|
||||
crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
*mctx = le32_to_cpup((__le32 *)key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_intel_init(struct ahash_request *req)
|
||||
static int crc32c_intel_init(struct shash_desc *desc)
|
||||
{
|
||||
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
||||
u32 *crcp = ahash_request_ctx(req);
|
||||
u32 *mctx = crypto_shash_ctx(desc->tfm);
|
||||
u32 *crcp = shash_desc_ctx(desc);
|
||||
|
||||
*crcp = *mctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_intel_update(struct ahash_request *req)
|
||||
static int crc32c_intel_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_hash_walk walk;
|
||||
u32 *crcp = ahash_request_ctx(req);
|
||||
u32 crc = *crcp;
|
||||
int nbytes;
|
||||
u32 *crcp = shash_desc_ctx(desc);
|
||||
|
||||
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
|
||||
nbytes = crypto_hash_walk_done(&walk, 0))
|
||||
crc = crc32c_intel_le_hw(crc, walk.data, nbytes);
|
||||
|
||||
*crcp = crc;
|
||||
*crcp = crc32c_intel_le_hw(*crcp, data, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_intel_final(struct ahash_request *req)
|
||||
static int __crc32c_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
|
||||
u8 *out)
|
||||
{
|
||||
u32 *crcp = ahash_request_ctx(req);
|
||||
|
||||
*(__le32 *)req->result = ~cpu_to_le32p(crcp);
|
||||
*(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_intel_digest(struct ahash_request *req)
|
||||
static int crc32c_intel_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
struct crypto_hash_walk walk;
|
||||
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
||||
u32 crc = *mctx;
|
||||
int nbytes;
|
||||
return __crc32c_intel_finup(shash_desc_ctx(desc), data, len, out);
|
||||
}
|
||||
|
||||
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
|
||||
nbytes = crypto_hash_walk_done(&walk, 0))
|
||||
crc = crc32c_intel_le_hw(crc, walk.data, nbytes);
|
||||
static int crc32c_intel_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
u32 *crcp = shash_desc_ctx(desc);
|
||||
|
||||
*(__le32 *)req->result = ~cpu_to_le32(crc);
|
||||
*(__le32 *)out = ~cpu_to_le32p(crcp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_intel_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
return __crc32c_intel_finup(crypto_shash_ctx(desc->tfm), data, len,
|
||||
out);
|
||||
}
|
||||
|
||||
static int crc32c_intel_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
u32 *key = crypto_tfm_ctx(tfm);
|
||||
|
||||
*key = ~0;
|
||||
|
||||
tfm->crt_ahash.reqsize = sizeof(u32);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "crc32c",
|
||||
.cra_driver_name = "crc32c-intel",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
|
||||
.cra_blocksize = CHKSUM_BLOCK_SIZE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_ctxsize = sizeof(u32),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_init = crc32c_intel_cra_init,
|
||||
.cra_type = &crypto_ahash_type,
|
||||
.cra_u = {
|
||||
.ahash = {
|
||||
.digestsize = CHKSUM_DIGEST_SIZE,
|
||||
.setkey = crc32c_intel_setkey,
|
||||
.init = crc32c_intel_init,
|
||||
.update = crc32c_intel_update,
|
||||
.final = crc32c_intel_final,
|
||||
.digest = crc32c_intel_digest,
|
||||
}
|
||||
static struct shash_alg alg = {
|
||||
.setkey = crc32c_intel_setkey,
|
||||
.init = crc32c_intel_init,
|
||||
.update = crc32c_intel_update,
|
||||
.final = crc32c_intel_final,
|
||||
.finup = crc32c_intel_finup,
|
||||
.digest = crc32c_intel_digest,
|
||||
.descsize = sizeof(u32),
|
||||
.digestsize = CHKSUM_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "crc32c",
|
||||
.cra_driver_name = "crc32c-intel",
|
||||
.cra_priority = 200,
|
||||
.cra_blocksize = CHKSUM_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(u32),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = crc32c_intel_cra_init,
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -175,14 +177,14 @@ static struct crypto_alg alg = {
|
|||
static int __init crc32c_intel_mod_init(void)
|
||||
{
|
||||
if (cpu_has_xmm4_2)
|
||||
return crypto_register_alg(&alg);
|
||||
return crypto_register_shash(&alg);
|
||||
else
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void __exit crc32c_intel_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(crc32c_intel_mod_init);
|
||||
|
@ -194,4 +196,3 @@ MODULE_LICENSE("GPL");
|
|||
|
||||
MODULE_ALIAS("crc32c");
|
||||
MODULE_ALIAS("crc32c-intel");
|
||||
|
||||
|
|
|
@ -102,6 +102,7 @@ config CRYPTO_NULL
|
|||
tristate "Null algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
These are 'Null' algorithms, used by IPsec, which do nothing.
|
||||
|
||||
|
@ -256,12 +257,10 @@ comment "Digest"
|
|||
config CRYPTO_CRC32C
|
||||
tristate "CRC32c CRC algorithm"
|
||||
select CRYPTO_HASH
|
||||
select LIBCRC32C
|
||||
help
|
||||
Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
|
||||
by iSCSI for header and data digests and by others.
|
||||
See Castagnoli93. This implementation uses lib/libcrc32c.
|
||||
Module will be crc32c.
|
||||
See Castagnoli93. Module will be crc32c.
|
||||
|
||||
config CRYPTO_CRC32C_INTEL
|
||||
tristate "CRC32c INTEL hardware acceleration"
|
||||
|
@ -277,19 +276,19 @@ config CRYPTO_CRC32C_INTEL
|
|||
|
||||
config CRYPTO_MD4
|
||||
tristate "MD4 digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
MD4 message digest algorithm (RFC1320).
|
||||
|
||||
config CRYPTO_MD5
|
||||
tristate "MD5 digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
MD5 message digest algorithm (RFC1321).
|
||||
|
||||
config CRYPTO_MICHAEL_MIC
|
||||
tristate "Michael MIC keyed digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Michael MIC is used for message integrity protection in TKIP
|
||||
(IEEE 802.11i). This algorithm is required for TKIP, but it
|
||||
|
@ -298,7 +297,7 @@ config CRYPTO_MICHAEL_MIC
|
|||
|
||||
config CRYPTO_RMD128
|
||||
tristate "RIPEMD-128 digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
RIPEMD-128 (ISO/IEC 10118-3:2004).
|
||||
|
||||
|
@ -311,7 +310,7 @@ config CRYPTO_RMD128
|
|||
|
||||
config CRYPTO_RMD160
|
||||
tristate "RIPEMD-160 digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
RIPEMD-160 (ISO/IEC 10118-3:2004).
|
||||
|
||||
|
@ -328,7 +327,7 @@ config CRYPTO_RMD160
|
|||
|
||||
config CRYPTO_RMD256
|
||||
tristate "RIPEMD-256 digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
RIPEMD-256 is an optional extension of RIPEMD-128 with a
|
||||
256 bit hash. It is intended for applications that require
|
||||
|
@ -340,7 +339,7 @@ config CRYPTO_RMD256
|
|||
|
||||
config CRYPTO_RMD320
|
||||
tristate "RIPEMD-320 digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
RIPEMD-320 is an optional extension of RIPEMD-160 with a
|
||||
320 bit hash. It is intended for applications that require
|
||||
|
@ -352,13 +351,13 @@ config CRYPTO_RMD320
|
|||
|
||||
config CRYPTO_SHA1
|
||||
tristate "SHA1 digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
|
||||
|
||||
config CRYPTO_SHA256
|
||||
tristate "SHA224 and SHA256 digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
SHA256 secure hash standard (DFIPS 180-2).
|
||||
|
||||
|
@ -370,7 +369,7 @@ config CRYPTO_SHA256
|
|||
|
||||
config CRYPTO_SHA512
|
||||
tristate "SHA384 and SHA512 digest algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
SHA512 secure hash standard (DFIPS 180-2).
|
||||
|
||||
|
@ -382,7 +381,7 @@ config CRYPTO_SHA512
|
|||
|
||||
config CRYPTO_TGR192
|
||||
tristate "Tiger digest algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Tiger hash algorithm 192, 160 and 128-bit hashes
|
||||
|
||||
|
@ -395,7 +394,7 @@ config CRYPTO_TGR192
|
|||
|
||||
config CRYPTO_WP512
|
||||
tristate "Whirlpool digest algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Whirlpool hash algorithm 512, 384 and 256-bit hashes
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
|
|||
|
||||
crypto_hash-objs := hash.o
|
||||
crypto_hash-objs += ahash.o
|
||||
crypto_hash-objs += shash.o
|
||||
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
|
||||
|
||||
cryptomgr-objs := algboss.o testmgr.o
|
||||
|
|
1147
crypto/aes_generic.c
1147
crypto/aes_generic.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -112,6 +112,22 @@ int crypto_hash_walk_first(struct ahash_request *req,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
|
||||
|
||||
int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
|
||||
struct crypto_hash_walk *walk,
|
||||
struct scatterlist *sg, unsigned int len)
|
||||
{
|
||||
walk->total = len;
|
||||
|
||||
if (!walk->total)
|
||||
return 0;
|
||||
|
||||
walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
|
||||
walk->sg = sg;
|
||||
walk->flags = hdesc->flags;
|
||||
|
||||
return hash_walk_new_entry(walk);
|
||||
}
|
||||
|
||||
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
|
@ -146,6 +162,26 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|||
return ahash->setkey(tfm, key, keylen);
|
||||
}
|
||||
|
||||
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int crypto_ahash_import(struct ahash_request *req, const u8 *in)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct ahash_alg *alg = crypto_ahash_alg(tfm);
|
||||
|
||||
memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm));
|
||||
|
||||
if (alg->reinit)
|
||||
alg->reinit(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_import);
|
||||
|
||||
static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
|
@ -164,7 +200,7 @@ static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
|||
crt->update = alg->update;
|
||||
crt->final = alg->final;
|
||||
crt->digest = alg->digest;
|
||||
crt->setkey = ahash_setkey;
|
||||
crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey;
|
||||
crt->digestsize = alg->digestsize;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -161,7 +161,7 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
|
|||
/*
|
||||
* Now update our DT value
|
||||
*/
|
||||
for (i = 0; i < DEFAULT_BLK_SZ; i++) {
|
||||
for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) {
|
||||
ctx->DT[i] += 1;
|
||||
if (ctx->DT[i] != 0)
|
||||
break;
|
||||
|
@ -223,9 +223,10 @@ remainder:
|
|||
}
|
||||
|
||||
/*
|
||||
* Copy up to the next whole block size
|
||||
* Copy any data less than an entire block
|
||||
*/
|
||||
if (byte_count < DEFAULT_BLK_SZ) {
|
||||
empty_rbuf:
|
||||
for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
|
||||
ctx->rand_data_valid++) {
|
||||
*ptr = ctx->rand_data[ctx->rand_data_valid];
|
||||
|
@ -240,18 +241,22 @@ remainder:
|
|||
* Now copy whole blocks
|
||||
*/
|
||||
for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
|
||||
if (_get_more_prng_bytes(ctx) < 0) {
|
||||
memset(buf, 0, nbytes);
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
|
||||
if (_get_more_prng_bytes(ctx) < 0) {
|
||||
memset(buf, 0, nbytes);
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
if (ctx->rand_data_valid > 0)
|
||||
goto empty_rbuf;
|
||||
memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
|
||||
ctx->rand_data_valid += DEFAULT_BLK_SZ;
|
||||
ptr += DEFAULT_BLK_SZ;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now copy any extra partial data
|
||||
* Now go back and get any remaining partial block
|
||||
*/
|
||||
if (byte_count)
|
||||
goto remainder;
|
||||
|
@ -349,15 +354,25 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
|
|||
return get_prng_bytes(rdata, dlen, prng);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the cprng_registered reset method the seed value is
|
||||
* interpreted as the tuple { V KEY DT}
|
||||
* V and KEY are required during reset, and DT is optional, detected
|
||||
* as being present by testing the length of the seed
|
||||
*/
|
||||
static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
|
||||
{
|
||||
struct prng_context *prng = crypto_rng_ctx(tfm);
|
||||
u8 *key = seed + DEFAULT_PRNG_KSZ;
|
||||
u8 *key = seed + DEFAULT_BLK_SZ;
|
||||
u8 *dt = NULL;
|
||||
|
||||
if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
|
||||
return -EINVAL;
|
||||
|
||||
reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, NULL);
|
||||
if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ))
|
||||
dt = key + DEFAULT_PRNG_KSZ;
|
||||
|
||||
reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, dt);
|
||||
|
||||
if (prng->flags & PRNG_NEED_RESET)
|
||||
return -EINVAL;
|
||||
|
@ -379,7 +394,7 @@ static struct crypto_alg rng_alg = {
|
|||
.rng = {
|
||||
.rng_make_random = cprng_get_random,
|
||||
.rng_reset = cprng_reset,
|
||||
.seedsize = DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ,
|
||||
.seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
121
crypto/api.c
121
crypto/api.c
|
@ -300,8 +300,8 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
|
|||
const struct crypto_type *type = tfm->__crt_alg->cra_type;
|
||||
|
||||
if (type) {
|
||||
if (type->exit)
|
||||
type->exit(tfm);
|
||||
if (tfm->exit)
|
||||
tfm->exit(tfm);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -379,17 +379,16 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
|
|||
if (err)
|
||||
goto out_free_tfm;
|
||||
|
||||
if (alg->cra_init && (err = alg->cra_init(tfm))) {
|
||||
if (err == -EAGAIN)
|
||||
crypto_shoot_alg(alg);
|
||||
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
|
||||
goto cra_init_failed;
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
cra_init_failed:
|
||||
crypto_exit_ops(tfm);
|
||||
out_free_tfm:
|
||||
if (err == -EAGAIN)
|
||||
crypto_shoot_alg(alg);
|
||||
kfree(tfm);
|
||||
out_err:
|
||||
tfm = ERR_PTR(err);
|
||||
|
@ -404,6 +403,9 @@ EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
|
|||
* @type: Type of algorithm
|
||||
* @mask: Mask for type comparison
|
||||
*
|
||||
* This function should not be used by new algorithm types.
|
||||
* Plesae use crypto_alloc_tfm instead.
|
||||
*
|
||||
* crypto_alloc_base() will first attempt to locate an already loaded
|
||||
* algorithm. If that fails and the kernel supports dynamically loadable
|
||||
* modules, it will then attempt to load a module of the same name or
|
||||
|
@ -450,6 +452,111 @@ err:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_base);
|
||||
|
||||
struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend)
|
||||
{
|
||||
char *mem;
|
||||
struct crypto_tfm *tfm = NULL;
|
||||
unsigned int tfmsize;
|
||||
unsigned int total;
|
||||
int err = -ENOMEM;
|
||||
|
||||
tfmsize = frontend->tfmsize;
|
||||
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend);
|
||||
|
||||
mem = kzalloc(total, GFP_KERNEL);
|
||||
if (mem == NULL)
|
||||
goto out_err;
|
||||
|
||||
tfm = (struct crypto_tfm *)(mem + tfmsize);
|
||||
tfm->__crt_alg = alg;
|
||||
|
||||
err = frontend->init_tfm(tfm, frontend);
|
||||
if (err)
|
||||
goto out_free_tfm;
|
||||
|
||||
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
|
||||
goto cra_init_failed;
|
||||
|
||||
goto out;
|
||||
|
||||
cra_init_failed:
|
||||
crypto_exit_ops(tfm);
|
||||
out_free_tfm:
|
||||
if (err == -EAGAIN)
|
||||
crypto_shoot_alg(alg);
|
||||
kfree(mem);
|
||||
out_err:
|
||||
tfm = ERR_PTR(err);
|
||||
out:
|
||||
return tfm;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_create_tfm);
|
||||
|
||||
/*
|
||||
* crypto_alloc_tfm - Locate algorithm and allocate transform
|
||||
* @alg_name: Name of algorithm
|
||||
* @frontend: Frontend algorithm type
|
||||
* @type: Type of algorithm
|
||||
* @mask: Mask for type comparison
|
||||
*
|
||||
* crypto_alloc_tfm() will first attempt to locate an already loaded
|
||||
* algorithm. If that fails and the kernel supports dynamically loadable
|
||||
* modules, it will then attempt to load a module of the same name or
|
||||
* alias. If that fails it will send a query to any loaded crypto manager
|
||||
* to construct an algorithm on the fly. A refcount is grabbed on the
|
||||
* algorithm which is then associated with the new transform.
|
||||
*
|
||||
* The returned transform is of a non-determinate type. Most people
|
||||
* should use one of the more specific allocation functions such as
|
||||
* crypto_alloc_blkcipher.
|
||||
*
|
||||
* In case of error the return value is an error pointer.
|
||||
*/
|
||||
struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
|
||||
const struct crypto_type *frontend,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
|
||||
struct crypto_tfm *tfm;
|
||||
int err;
|
||||
|
||||
type &= frontend->maskclear;
|
||||
mask &= frontend->maskclear;
|
||||
type |= frontend->type;
|
||||
mask |= frontend->maskset;
|
||||
|
||||
lookup = frontend->lookup ?: crypto_alg_mod_lookup;
|
||||
|
||||
for (;;) {
|
||||
struct crypto_alg *alg;
|
||||
|
||||
alg = lookup(alg_name, type, mask);
|
||||
if (IS_ERR(alg)) {
|
||||
err = PTR_ERR(alg);
|
||||
goto err;
|
||||
}
|
||||
|
||||
tfm = crypto_create_tfm(alg, frontend);
|
||||
if (!IS_ERR(tfm))
|
||||
return tfm;
|
||||
|
||||
crypto_mod_put(alg);
|
||||
err = PTR_ERR(tfm);
|
||||
|
||||
err:
|
||||
if (err != -EAGAIN)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
err = -EINTR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
|
||||
|
||||
/*
|
||||
* crypto_free_tfm - Free crypto transform
|
||||
|
@ -469,7 +576,7 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
|
|||
alg = tfm->__crt_alg;
|
||||
size = sizeof(*tfm) + alg->cra_ctxsize;
|
||||
|
||||
if (alg->cra_exit)
|
||||
if (!tfm->exit && alg->cra_exit)
|
||||
alg->cra_exit(tfm);
|
||||
crypto_exit_ops(tfm);
|
||||
crypto_mod_put(alg);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
|
@ -431,6 +432,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
|
|||
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
|
||||
inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ?
|
||||
auth->cra_hash.digestsize :
|
||||
auth->cra_type ?
|
||||
__crypto_shash_alg(auth)->digestsize :
|
||||
auth->cra_digest.dia_digestsize;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
static const u32 camellia_sp1110[256] = {
|
||||
0x70707000,0x82828200,0x2c2c2c00,0xececec00,
|
||||
|
@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = {
|
|||
/*
|
||||
* macros
|
||||
*/
|
||||
#define GETU32(v, pt) \
|
||||
do { \
|
||||
/* latest breed of gcc is clever enough to use move */ \
|
||||
memcpy(&(v), (pt), 4); \
|
||||
(v) = be32_to_cpu(v); \
|
||||
} while(0)
|
||||
|
||||
/* rotation right shift 1byte */
|
||||
#define ROR8(x) (((x) >> 8) + ((x) << 24))
|
||||
/* rotation left shift 1bit */
|
||||
#define ROL1(x) (((x) << 1) + ((x) >> 31))
|
||||
/* rotation left shift 1byte */
|
||||
#define ROL8(x) (((x) << 8) + ((x) >> 24))
|
||||
|
||||
#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
|
||||
do { \
|
||||
w0 = ll; \
|
||||
|
@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = {
|
|||
^ camellia_sp3033[(u8)(il >> 8)] \
|
||||
^ camellia_sp4404[(u8)(il )]; \
|
||||
yl ^= yr; \
|
||||
yr = ROR8(yr); \
|
||||
yr = ror32(yr, 8); \
|
||||
yr ^= yl; \
|
||||
} while(0)
|
||||
|
||||
|
@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
subL[7] ^= subL[1]; subR[7] ^= subR[1];
|
||||
subL[1] ^= subR[1] & ~subR[9];
|
||||
dw = subL[1] & subL[9],
|
||||
subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */
|
||||
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
|
||||
/* round 8 */
|
||||
subL[11] ^= subL[1]; subR[11] ^= subR[1];
|
||||
/* round 10 */
|
||||
|
@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
subL[15] ^= subL[1]; subR[15] ^= subR[1];
|
||||
subL[1] ^= subR[1] & ~subR[17];
|
||||
dw = subL[1] & subL[17],
|
||||
subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */
|
||||
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
|
||||
/* round 14 */
|
||||
subL[19] ^= subL[1]; subR[19] ^= subR[1];
|
||||
/* round 16 */
|
||||
|
@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
} else {
|
||||
subL[1] ^= subR[1] & ~subR[25];
|
||||
dw = subL[1] & subL[25],
|
||||
subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */
|
||||
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
|
||||
/* round 20 */
|
||||
subL[27] ^= subL[1]; subR[27] ^= subR[1];
|
||||
/* round 22 */
|
||||
|
@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
subL[26] ^= kw4l; subR[26] ^= kw4r;
|
||||
kw4l ^= kw4r & ~subR[24];
|
||||
dw = kw4l & subL[24],
|
||||
kw4r ^= ROL1(dw); /* modified for FL(kl5) */
|
||||
kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
|
||||
}
|
||||
/* round 17 */
|
||||
subL[22] ^= kw4l; subR[22] ^= kw4r;
|
||||
|
@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
subL[18] ^= kw4l; subR[18] ^= kw4r;
|
||||
kw4l ^= kw4r & ~subR[16];
|
||||
dw = kw4l & subL[16],
|
||||
kw4r ^= ROL1(dw); /* modified for FL(kl3) */
|
||||
kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
|
||||
/* round 11 */
|
||||
subL[14] ^= kw4l; subR[14] ^= kw4r;
|
||||
/* round 9 */
|
||||
|
@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
subL[10] ^= kw4l; subR[10] ^= kw4r;
|
||||
kw4l ^= kw4r & ~subR[8];
|
||||
dw = kw4l & subL[8],
|
||||
kw4r ^= ROL1(dw); /* modified for FL(kl1) */
|
||||
kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
|
||||
/* round 5 */
|
||||
subL[6] ^= kw4l; subR[6] ^= kw4r;
|
||||
/* round 3 */
|
||||
|
@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_R(6) = subR[5] ^ subR[7];
|
||||
tl = subL[10] ^ (subR[10] & ~subR[8]);
|
||||
dw = tl & subL[8], /* FL(kl1) */
|
||||
tr = subR[10] ^ ROL1(dw);
|
||||
tr = subR[10] ^ rol32(dw, 1);
|
||||
SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
|
||||
SUBKEY_R(7) = subR[6] ^ tr;
|
||||
SUBKEY_L(8) = subL[8]; /* FL(kl1) */
|
||||
|
@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_R(9) = subR[9];
|
||||
tl = subL[7] ^ (subR[7] & ~subR[9]);
|
||||
dw = tl & subL[9], /* FLinv(kl2) */
|
||||
tr = subR[7] ^ ROL1(dw);
|
||||
tr = subR[7] ^ rol32(dw, 1);
|
||||
SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
|
||||
SUBKEY_R(10) = tr ^ subR[11];
|
||||
SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
|
||||
|
@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_R(14) = subR[13] ^ subR[15];
|
||||
tl = subL[18] ^ (subR[18] & ~subR[16]);
|
||||
dw = tl & subL[16], /* FL(kl3) */
|
||||
tr = subR[18] ^ ROL1(dw);
|
||||
tr = subR[18] ^ rol32(dw, 1);
|
||||
SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
|
||||
SUBKEY_R(15) = subR[14] ^ tr;
|
||||
SUBKEY_L(16) = subL[16]; /* FL(kl3) */
|
||||
|
@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_R(17) = subR[17];
|
||||
tl = subL[15] ^ (subR[15] & ~subR[17]);
|
||||
dw = tl & subL[17], /* FLinv(kl4) */
|
||||
tr = subR[15] ^ ROL1(dw);
|
||||
tr = subR[15] ^ rol32(dw, 1);
|
||||
SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
|
||||
SUBKEY_R(18) = tr ^ subR[19];
|
||||
SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
|
||||
|
@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
} else {
|
||||
tl = subL[26] ^ (subR[26] & ~subR[24]);
|
||||
dw = tl & subL[24], /* FL(kl5) */
|
||||
tr = subR[26] ^ ROL1(dw);
|
||||
tr = subR[26] ^ rol32(dw, 1);
|
||||
SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
|
||||
SUBKEY_R(23) = subR[22] ^ tr;
|
||||
SUBKEY_L(24) = subL[24]; /* FL(kl5) */
|
||||
|
@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
SUBKEY_R(25) = subR[25];
|
||||
tl = subL[23] ^ (subR[23] & ~subR[25]);
|
||||
dw = tl & subL[25], /* FLinv(kl6) */
|
||||
tr = subR[23] ^ ROL1(dw);
|
||||
tr = subR[23] ^ rol32(dw, 1);
|
||||
SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
|
||||
SUBKEY_R(26) = tr ^ subR[27];
|
||||
SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
|
||||
|
@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
|
|||
/* apply the inverse of the last half of P-function */
|
||||
i = 2;
|
||||
do {
|
||||
dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */
|
||||
dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */
|
||||
SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
|
||||
dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */
|
||||
dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */
|
||||
SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
|
||||
dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */
|
||||
dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */
|
||||
SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
|
||||
dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */
|
||||
dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */
|
||||
SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
|
||||
dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */
|
||||
dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 8);/* round 5 */
|
||||
SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
|
||||
dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */
|
||||
dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */
|
||||
SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
|
||||
i += 8;
|
||||
} while (i < max);
|
||||
|
@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
|
|||
/**
|
||||
* k == kll || klr || krl || krr (|| is concatenation)
|
||||
*/
|
||||
GETU32(kll, key );
|
||||
GETU32(klr, key + 4);
|
||||
GETU32(krl, key + 8);
|
||||
GETU32(krr, key + 12);
|
||||
kll = get_unaligned_be32(key);
|
||||
klr = get_unaligned_be32(key + 4);
|
||||
krl = get_unaligned_be32(key + 8);
|
||||
krr = get_unaligned_be32(key + 12);
|
||||
|
||||
/* generate KL dependent subkeys */
|
||||
/* kw1 */
|
||||
|
@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
|
|||
* key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
|
||||
* (|| is concatenation)
|
||||
*/
|
||||
GETU32(kll, key );
|
||||
GETU32(klr, key + 4);
|
||||
GETU32(krl, key + 8);
|
||||
GETU32(krr, key + 12);
|
||||
GETU32(krll, key + 16);
|
||||
GETU32(krlr, key + 20);
|
||||
GETU32(krrl, key + 24);
|
||||
GETU32(krrr, key + 28);
|
||||
kll = get_unaligned_be32(key);
|
||||
klr = get_unaligned_be32(key + 4);
|
||||
krl = get_unaligned_be32(key + 8);
|
||||
krr = get_unaligned_be32(key + 12);
|
||||
krll = get_unaligned_be32(key + 16);
|
||||
krlr = get_unaligned_be32(key + 20);
|
||||
krrl = get_unaligned_be32(key + 24);
|
||||
krrr = get_unaligned_be32(key + 28);
|
||||
|
||||
/* generate KL dependent subkeys */
|
||||
/* kw1 */
|
||||
|
@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
|
|||
t0 &= ll; \
|
||||
t2 |= rr; \
|
||||
rl ^= t2; \
|
||||
lr ^= ROL1(t0); \
|
||||
lr ^= rol32(t0, 1); \
|
||||
t3 = krl; \
|
||||
t1 = klr; \
|
||||
t3 &= rl; \
|
||||
t1 |= lr; \
|
||||
ll ^= t1; \
|
||||
rr ^= ROL1(t3); \
|
||||
rr ^= rol32(t3, 1); \
|
||||
} while(0)
|
||||
|
||||
#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
|
||||
|
@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
|
|||
il ^= kl; \
|
||||
ir ^= il ^ kr; \
|
||||
yl ^= ir; \
|
||||
yr ^= ROR8(il) ^ ir; \
|
||||
yr ^= ror32(il, 8) ^ ir; \
|
||||
} while(0)
|
||||
|
||||
/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
|
||||
|
|
326
crypto/crc32c.c
326
crypto/crc32c.c
|
@ -3,8 +3,29 @@
|
|||
*
|
||||
* CRC32C chksum
|
||||
*
|
||||
* This module file is a wrapper to invoke the lib/crc32c routines.
|
||||
*@Article{castagnoli-crc,
|
||||
* author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman},
|
||||
* title = {{Optimization of Cyclic Redundancy-Check Codes with 24
|
||||
* and 32 Parity Bits}},
|
||||
* journal = IEEE Transactions on Communication,
|
||||
* year = {1993},
|
||||
* volume = {41},
|
||||
* number = {6},
|
||||
* pages = {},
|
||||
* month = {June},
|
||||
*}
|
||||
* Used by the iSCSI driver, possibly others, and derived from the
|
||||
* the iscsi-crc.c module of the linux-iscsi driver at
|
||||
* http://linux-iscsi.sourceforge.net.
|
||||
*
|
||||
* Following the example of lib/crc32, this function is intended to be
|
||||
* flexible and useful for all users. Modules that currently have their
|
||||
* own crc32c, but hopefully may be able to use this one are:
|
||||
* net/sctp (please add all your doco to here if you change to
|
||||
* use this one!)
|
||||
* <endoflist>
|
||||
*
|
||||
* Copyright (c) 2004 Cisco Systems, Inc.
|
||||
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
|
@ -18,27 +39,121 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/crc32c.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#define CHKSUM_BLOCK_SIZE 1
|
||||
#define CHKSUM_DIGEST_SIZE 4
|
||||
|
||||
struct chksum_ctx {
|
||||
u32 crc;
|
||||
u32 key;
|
||||
};
|
||||
|
||||
struct chksum_desc_ctx {
|
||||
u32 crc;
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the CRC-32C table
|
||||
* Generated with:
|
||||
* width = 32 bits
|
||||
* poly = 0x1EDC6F41
|
||||
* reflect input bytes = true
|
||||
* reflect output bytes = true
|
||||
*/
|
||||
|
||||
static const u32 crc32c_table[256] = {
|
||||
0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
|
||||
0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
|
||||
0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
|
||||
0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L,
|
||||
0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
|
||||
0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L,
|
||||
0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L,
|
||||
0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL,
|
||||
0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL,
|
||||
0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
|
||||
0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L,
|
||||
0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL,
|
||||
0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L,
|
||||
0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL,
|
||||
0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
|
||||
0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L,
|
||||
0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L,
|
||||
0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L,
|
||||
0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L,
|
||||
0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
|
||||
0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L,
|
||||
0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L,
|
||||
0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L,
|
||||
0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L,
|
||||
0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
|
||||
0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L,
|
||||
0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L,
|
||||
0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L,
|
||||
0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L,
|
||||
0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
|
||||
0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L,
|
||||
0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L,
|
||||
0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL,
|
||||
0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L,
|
||||
0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
|
||||
0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL,
|
||||
0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L,
|
||||
0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL,
|
||||
0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL,
|
||||
0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
|
||||
0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L,
|
||||
0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL,
|
||||
0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL,
|
||||
0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L,
|
||||
0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
|
||||
0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L,
|
||||
0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L,
|
||||
0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL,
|
||||
0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L,
|
||||
0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
|
||||
0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL,
|
||||
0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L,
|
||||
0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL,
|
||||
0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L,
|
||||
0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
|
||||
0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL,
|
||||
0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL,
|
||||
0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L,
|
||||
0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L,
|
||||
0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
|
||||
0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L,
|
||||
0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL,
|
||||
0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL,
|
||||
0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L
|
||||
};
|
||||
|
||||
/*
|
||||
* Steps through buffer one byte at at time, calculates reflected
|
||||
* crc using table.
|
||||
*/
|
||||
|
||||
static u32 crc32c(u32 crc, const u8 *data, unsigned int length)
|
||||
{
|
||||
while (length--)
|
||||
crc = crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8);
|
||||
|
||||
return crc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Steps through buffer one byte at at time, calculates reflected
|
||||
* crc using table.
|
||||
*/
|
||||
|
||||
static void chksum_init(struct crypto_tfm *tfm)
|
||||
static int chksum_init(struct shash_desc *desc)
|
||||
{
|
||||
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
mctx->crc = mctx->key;
|
||||
ctx->crc = mctx->key;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -46,35 +161,59 @@ static void chksum_init(struct crypto_tfm *tfm)
|
|||
* If your algorithm starts with ~0, then XOR with ~0 before you set
|
||||
* the seed.
|
||||
*/
|
||||
static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
|
||||
|
||||
if (keylen != sizeof(mctx->crc)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
if (keylen != sizeof(mctx->key)) {
|
||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
mctx->key = le32_to_cpu(*(__le32 *)key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void chksum_update(struct crypto_tfm *tfm, const u8 *data,
|
||||
unsigned int length)
|
||||
static int chksum_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int length)
|
||||
{
|
||||
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
mctx->crc = crc32c(mctx->crc, data, length);
|
||||
ctx->crc = crc32c(ctx->crc, data, length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void chksum_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int chksum_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
*(__le32 *)out = ~cpu_to_le32(mctx->crc);
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
*(__le32 *)out = ~cpu_to_le32p(&ctx->crc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_cra_init_old(struct crypto_tfm *tfm)
|
||||
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
|
||||
{
|
||||
*(__le32 *)out = ~cpu_to_le32(crc32c(*crcp, data, len));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chksum_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
return __chksum_finup(&ctx->crc, data, len, out);
|
||||
}
|
||||
|
||||
static int chksum_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int length, u8 *out)
|
||||
{
|
||||
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
return __chksum_finup(&mctx->key, data, length, out);
|
||||
}
|
||||
|
||||
static int crc32c_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
|
@ -82,144 +221,35 @@ static int crc32c_cra_init_old(struct crypto_tfm *tfm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg old_alg = {
|
||||
.cra_name = "crc32c",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = CHKSUM_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct chksum_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(old_alg.cra_list),
|
||||
.cra_init = crc32c_cra_init_old,
|
||||
.cra_u = {
|
||||
.digest = {
|
||||
.dia_digestsize= CHKSUM_DIGEST_SIZE,
|
||||
.dia_setkey = chksum_setkey,
|
||||
.dia_init = chksum_init,
|
||||
.dia_update = chksum_update,
|
||||
.dia_final = chksum_final
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Setting the seed allows arbitrary accumulators and flexible XOR policy
|
||||
* If your algorithm starts with ~0, then XOR with ~0 before you set
|
||||
* the seed.
|
||||
*/
|
||||
static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
u32 *mctx = crypto_ahash_ctx(hash);
|
||||
|
||||
if (keylen != sizeof(u32)) {
|
||||
crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
*mctx = le32_to_cpup((__le32 *)key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_init(struct ahash_request *req)
|
||||
{
|
||||
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
||||
u32 *crcp = ahash_request_ctx(req);
|
||||
|
||||
*crcp = *mctx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_update(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_hash_walk walk;
|
||||
u32 *crcp = ahash_request_ctx(req);
|
||||
u32 crc = *crcp;
|
||||
int nbytes;
|
||||
|
||||
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
|
||||
nbytes = crypto_hash_walk_done(&walk, 0))
|
||||
crc = crc32c(crc, walk.data, nbytes);
|
||||
|
||||
*crcp = crc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_final(struct ahash_request *req)
|
||||
{
|
||||
u32 *crcp = ahash_request_ctx(req);
|
||||
|
||||
*(__le32 *)req->result = ~cpu_to_le32p(crcp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_digest(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_hash_walk walk;
|
||||
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
||||
u32 crc = *mctx;
|
||||
int nbytes;
|
||||
|
||||
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
|
||||
nbytes = crypto_hash_walk_done(&walk, 0))
|
||||
crc = crc32c(crc, walk.data, nbytes);
|
||||
|
||||
*(__le32 *)req->result = ~cpu_to_le32(crc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crc32c_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
u32 *key = crypto_tfm_ctx(tfm);
|
||||
|
||||
*key = ~0;
|
||||
|
||||
tfm->crt_ahash.reqsize = sizeof(u32);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "crc32c",
|
||||
.cra_driver_name = "crc32c-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
|
||||
.cra_blocksize = CHKSUM_BLOCK_SIZE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_ctxsize = sizeof(u32),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_init = crc32c_cra_init,
|
||||
.cra_type = &crypto_ahash_type,
|
||||
.cra_u = {
|
||||
.ahash = {
|
||||
.digestsize = CHKSUM_DIGEST_SIZE,
|
||||
.setkey = crc32c_setkey,
|
||||
.init = crc32c_init,
|
||||
.update = crc32c_update,
|
||||
.final = crc32c_final,
|
||||
.digest = crc32c_digest,
|
||||
}
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = CHKSUM_DIGEST_SIZE,
|
||||
.setkey = chksum_setkey,
|
||||
.init = chksum_init,
|
||||
.update = chksum_update,
|
||||
.final = chksum_final,
|
||||
.finup = chksum_finup,
|
||||
.digest = chksum_digest,
|
||||
.descsize = sizeof(struct chksum_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "crc32c",
|
||||
.cra_driver_name = "crc32c-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_blocksize = CHKSUM_BLOCK_SIZE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_ctxsize = sizeof(struct chksum_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = crc32c_cra_init,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init crc32c_mod_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = crypto_register_alg(&old_alg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = crypto_register_alg(&alg);
|
||||
if (err)
|
||||
crypto_unregister_alg(&old_alg);
|
||||
|
||||
return err;
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit crc32c_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_alg(&old_alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(crc32c_mod_init);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -38,15 +39,31 @@ static int null_compress(struct crypto_tfm *tfm, const u8 *src,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void null_init(struct crypto_tfm *tfm)
|
||||
{ }
|
||||
static int null_init(struct shash_desc *desc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void null_update(struct crypto_tfm *tfm, const u8 *data,
|
||||
unsigned int len)
|
||||
{ }
|
||||
static int null_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void null_final(struct crypto_tfm *tfm, u8 *out)
|
||||
{ }
|
||||
static int null_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int null_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{ return 0; }
|
||||
|
||||
static int null_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
|
@ -89,19 +106,20 @@ static struct crypto_alg compress_null = {
|
|||
.coa_decompress = null_compress } }
|
||||
};
|
||||
|
||||
static struct crypto_alg digest_null = {
|
||||
.cra_name = "digest_null",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = NULL_BLOCK_SIZE,
|
||||
.cra_ctxsize = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(digest_null.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = NULL_DIGEST_SIZE,
|
||||
.dia_setkey = null_setkey,
|
||||
.dia_init = null_init,
|
||||
.dia_update = null_update,
|
||||
.dia_final = null_final } }
|
||||
static struct shash_alg digest_null = {
|
||||
.digestsize = NULL_DIGEST_SIZE,
|
||||
.setkey = null_hash_setkey,
|
||||
.init = null_init,
|
||||
.update = null_update,
|
||||
.finup = null_digest,
|
||||
.digest = null_digest,
|
||||
.final = null_final,
|
||||
.base = {
|
||||
.cra_name = "digest_null",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = NULL_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static struct crypto_alg cipher_null = {
|
||||
|
@ -154,7 +172,7 @@ static int __init crypto_null_mod_init(void)
|
|||
if (ret < 0)
|
||||
goto out_unregister_cipher;
|
||||
|
||||
ret = crypto_register_alg(&digest_null);
|
||||
ret = crypto_register_shash(&digest_null);
|
||||
if (ret < 0)
|
||||
goto out_unregister_skcipher;
|
||||
|
||||
|
@ -166,7 +184,7 @@ out:
|
|||
return ret;
|
||||
|
||||
out_unregister_digest:
|
||||
crypto_unregister_alg(&digest_null);
|
||||
crypto_unregister_shash(&digest_null);
|
||||
out_unregister_skcipher:
|
||||
crypto_unregister_alg(&skcipher_null);
|
||||
out_unregister_cipher:
|
||||
|
@ -177,7 +195,7 @@ out_unregister_cipher:
|
|||
static void __exit crypto_null_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&compress_null);
|
||||
crypto_unregister_alg(&digest_null);
|
||||
crypto_unregister_shash(&digest_null);
|
||||
crypto_unregister_alg(&skcipher_null);
|
||||
crypto_unregister_alg(&cipher_null);
|
||||
}
|
||||
|
|
|
@ -868,9 +868,10 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
|
|||
u32 *flags = &tfm->crt_flags;
|
||||
|
||||
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))))
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
|
||||
(*flags & CRYPTO_TFM_REQ_WEAK_KEY))
|
||||
{
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ do { \
|
|||
* /afs/transarc.com/public/afsps/afs.rel31b.export-src/rxkad/sboxes.h
|
||||
*/
|
||||
#undef Z
|
||||
#define Z(x) __constant_cpu_to_be32(x << 3)
|
||||
#define Z(x) cpu_to_be32(x << 3)
|
||||
static const __be32 sbox0[256] = {
|
||||
Z(0xea), Z(0x7f), Z(0xb2), Z(0x64), Z(0x9d), Z(0xb0), Z(0xd9), Z(0x11),
|
||||
Z(0xcd), Z(0x86), Z(0x86), Z(0x91), Z(0x0a), Z(0xb2), Z(0x93), Z(0x06),
|
||||
|
@ -110,7 +110,7 @@ static const __be32 sbox0[256] = {
|
|||
};
|
||||
|
||||
#undef Z
|
||||
#define Z(x) __constant_cpu_to_be32((x << 27) | (x >> 5))
|
||||
#define Z(x) cpu_to_be32((x << 27) | (x >> 5))
|
||||
static const __be32 sbox1[256] = {
|
||||
Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e),
|
||||
Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85),
|
||||
|
@ -147,7 +147,7 @@ static const __be32 sbox1[256] = {
|
|||
};
|
||||
|
||||
#undef Z
|
||||
#define Z(x) __constant_cpu_to_be32(x << 11)
|
||||
#define Z(x) cpu_to_be32(x << 11)
|
||||
static const __be32 sbox2[256] = {
|
||||
Z(0xf0), Z(0x37), Z(0x24), Z(0x53), Z(0x2a), Z(0x03), Z(0x83), Z(0x86),
|
||||
Z(0xd1), Z(0xec), Z(0x50), Z(0xf0), Z(0x42), Z(0x78), Z(0x2f), Z(0x6d),
|
||||
|
@ -184,7 +184,7 @@ static const __be32 sbox2[256] = {
|
|||
};
|
||||
|
||||
#undef Z
|
||||
#define Z(x) __constant_cpu_to_be32(x << 19)
|
||||
#define Z(x) cpu_to_be32(x << 19)
|
||||
static const __be32 sbox3[256] = {
|
||||
Z(0xa9), Z(0x2a), Z(0x48), Z(0x51), Z(0x84), Z(0x7e), Z(0x49), Z(0xe2),
|
||||
Z(0xb5), Z(0xb7), Z(0x42), Z(0x33), Z(0x7d), Z(0x5d), Z(0xa6), Z(0x12),
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -238,9 +238,11 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
|
|||
return ERR_CAST(alg);
|
||||
|
||||
inst = ERR_PTR(-EINVAL);
|
||||
ds = (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
|
||||
alg->cra_digest.dia_digestsize;
|
||||
ds = alg->cra_type == &crypto_hash_type ?
|
||||
alg->cra_hash.digestsize :
|
||||
alg->cra_type ?
|
||||
__crypto_shash_alg(alg)->digestsize :
|
||||
alg->cra_digest.dia_digestsize;
|
||||
if (ds > alg->cra_blocksize)
|
||||
goto out_put_alg;
|
||||
|
||||
|
|
|
@ -109,6 +109,8 @@ void crypto_alg_tested(const char *name, int err);
|
|||
void crypto_shoot_alg(struct crypto_alg *alg);
|
||||
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
|
||||
u32 mask);
|
||||
struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend);
|
||||
|
||||
int crypto_register_instance(struct crypto_template *tmpl,
|
||||
struct crypto_instance *inst);
|
||||
|
|
56
crypto/md4.c
56
crypto/md4.c
|
@ -20,8 +20,8 @@
|
|||
* (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -58,7 +58,7 @@ static inline u32 H(u32 x, u32 y, u32 z)
|
|||
{
|
||||
return x ^ y ^ z;
|
||||
}
|
||||
|
||||
|
||||
#define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s))
|
||||
#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
|
||||
#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
|
||||
|
@ -148,24 +148,26 @@ static void md4_transform(u32 *hash, u32 const *in)
|
|||
|
||||
static inline void md4_transform_helper(struct md4_ctx *ctx)
|
||||
{
|
||||
le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
|
||||
le32_to_cpu_array(ctx->block, ARRAY_SIZE(ctx->block));
|
||||
md4_transform(ctx->hash, ctx->block);
|
||||
}
|
||||
|
||||
static void md4_init(struct crypto_tfm *tfm)
|
||||
static int md4_init(struct shash_desc *desc)
|
||||
{
|
||||
struct md4_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct md4_ctx *mctx = shash_desc_ctx(desc);
|
||||
|
||||
mctx->hash[0] = 0x67452301;
|
||||
mctx->hash[1] = 0xefcdab89;
|
||||
mctx->hash[2] = 0x98badcfe;
|
||||
mctx->hash[3] = 0x10325476;
|
||||
mctx->byte_count = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
||||
static int md4_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
||||
{
|
||||
struct md4_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct md4_ctx *mctx = shash_desc_ctx(desc);
|
||||
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
|
||||
|
||||
mctx->byte_count += len;
|
||||
|
@ -173,7 +175,7 @@ static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|||
if (avail > len) {
|
||||
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
|
||||
data, len);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
|
||||
|
@ -191,11 +193,13 @@ static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|||
}
|
||||
|
||||
memcpy(mctx->block, data, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void md4_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int md4_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct md4_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct md4_ctx *mctx = shash_desc_ctx(desc);
|
||||
const unsigned int offset = mctx->byte_count & 0x3f;
|
||||
char *p = (char *)mctx->block + offset;
|
||||
int padding = 56 - (offset + 1);
|
||||
|
@ -214,33 +218,35 @@ static void md4_final(struct crypto_tfm *tfm, u8 *out)
|
|||
le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
|
||||
sizeof(u64)) / sizeof(u32));
|
||||
md4_transform(mctx->hash, mctx->block);
|
||||
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
|
||||
cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash));
|
||||
memcpy(out, mctx->hash, sizeof(mctx->hash));
|
||||
memset(mctx, 0, sizeof(*mctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "md4",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct md4_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = MD4_DIGEST_SIZE,
|
||||
.dia_init = md4_init,
|
||||
.dia_update = md4_update,
|
||||
.dia_final = md4_final } }
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = MD4_DIGEST_SIZE,
|
||||
.init = md4_init,
|
||||
.update = md4_update,
|
||||
.final = md4_final,
|
||||
.descsize = sizeof(struct md4_ctx),
|
||||
.base = {
|
||||
.cra_name = "md4",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init md4_mod_init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit md4_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(md4_mod_init);
|
||||
|
|
50
crypto/md5.c
50
crypto/md5.c
|
@ -15,10 +15,10 @@
|
|||
* any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
@ -147,20 +147,22 @@ static inline void md5_transform_helper(struct md5_ctx *ctx)
|
|||
md5_transform(ctx->hash, ctx->block);
|
||||
}
|
||||
|
||||
static void md5_init(struct crypto_tfm *tfm)
|
||||
static int md5_init(struct shash_desc *desc)
|
||||
{
|
||||
struct md5_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct md5_ctx *mctx = shash_desc_ctx(desc);
|
||||
|
||||
mctx->hash[0] = 0x67452301;
|
||||
mctx->hash[1] = 0xefcdab89;
|
||||
mctx->hash[2] = 0x98badcfe;
|
||||
mctx->hash[3] = 0x10325476;
|
||||
mctx->byte_count = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
||||
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
||||
{
|
||||
struct md5_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct md5_ctx *mctx = shash_desc_ctx(desc);
|
||||
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
|
||||
|
||||
mctx->byte_count += len;
|
||||
|
@ -168,7 +170,7 @@ static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|||
if (avail > len) {
|
||||
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
|
||||
data, len);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
|
||||
|
@ -186,11 +188,13 @@ static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|||
}
|
||||
|
||||
memcpy(mctx->block, data, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void md5_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int md5_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct md5_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct md5_ctx *mctx = shash_desc_ctx(desc);
|
||||
const unsigned int offset = mctx->byte_count & 0x3f;
|
||||
char *p = (char *)mctx->block + offset;
|
||||
int padding = 56 - (offset + 1);
|
||||
|
@ -212,30 +216,32 @@ static void md5_final(struct crypto_tfm *tfm, u8 *out)
|
|||
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
|
||||
memcpy(out, mctx->hash, sizeof(mctx->hash));
|
||||
memset(mctx, 0, sizeof(*mctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "md5",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct md5_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = MD5_DIGEST_SIZE,
|
||||
.dia_init = md5_init,
|
||||
.dia_update = md5_update,
|
||||
.dia_final = md5_final } }
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = MD5_DIGEST_SIZE,
|
||||
.init = md5_init,
|
||||
.update = md5_update,
|
||||
.final = md5_final,
|
||||
.descsize = sizeof(struct md5_ctx),
|
||||
.base = {
|
||||
.cra_name = "md5",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init md5_mod_init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit md5_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(md5_mod_init);
|
||||
|
|
|
@ -9,23 +9,25 @@
|
|||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
||||
struct michael_mic_ctx {
|
||||
u32 l, r;
|
||||
};
|
||||
|
||||
struct michael_mic_desc_ctx {
|
||||
u8 pending[4];
|
||||
size_t pending_len;
|
||||
|
||||
u32 l, r;
|
||||
};
|
||||
|
||||
|
||||
static inline u32 xswap(u32 val)
|
||||
{
|
||||
return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
|
||||
|
@ -45,17 +47,22 @@ do { \
|
|||
} while (0)
|
||||
|
||||
|
||||
static void michael_init(struct crypto_tfm *tfm)
|
||||
static int michael_init(struct shash_desc *desc)
|
||||
{
|
||||
struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
|
||||
struct michael_mic_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
mctx->pending_len = 0;
|
||||
mctx->l = ctx->l;
|
||||
mctx->r = ctx->r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void michael_update(struct crypto_tfm *tfm, const u8 *data,
|
||||
static int michael_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
|
||||
const __le32 *src;
|
||||
|
||||
if (mctx->pending_len) {
|
||||
|
@ -68,7 +75,7 @@ static void michael_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
len -= flen;
|
||||
|
||||
if (mctx->pending_len < 4)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
src = (const __le32 *)mctx->pending;
|
||||
mctx->l ^= le32_to_cpup(src);
|
||||
|
@ -88,12 +95,14 @@ static void michael_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
mctx->pending_len = len;
|
||||
memcpy(mctx->pending, src, len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void michael_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int michael_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
|
||||
u8 *data = mctx->pending;
|
||||
__le32 *dst = (__le32 *)out;
|
||||
|
||||
|
@ -119,17 +128,20 @@ static void michael_final(struct crypto_tfm *tfm, u8 *out)
|
|||
|
||||
dst[0] = cpu_to_le32(mctx->l);
|
||||
dst[1] = cpu_to_le32(mctx->r);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int michael_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
static int michael_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
|
||||
struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm);
|
||||
|
||||
const __le32 *data = (const __le32 *)key;
|
||||
|
||||
if (keylen != 8) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -138,33 +150,31 @@ static int michael_setkey(struct crypto_tfm *tfm, const u8 *key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static struct crypto_alg michael_mic_alg = {
|
||||
.cra_name = "michael_mic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = 8,
|
||||
.cra_ctxsize = sizeof(struct michael_mic_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = 8,
|
||||
.dia_init = michael_init,
|
||||
.dia_update = michael_update,
|
||||
.dia_final = michael_final,
|
||||
.dia_setkey = michael_setkey } }
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = 8,
|
||||
.setkey = michael_setkey,
|
||||
.init = michael_init,
|
||||
.update = michael_update,
|
||||
.final = michael_final,
|
||||
.descsize = sizeof(struct michael_mic_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "michael_mic",
|
||||
.cra_blocksize = 8,
|
||||
.cra_alignmask = 3,
|
||||
.cra_ctxsize = sizeof(struct michael_mic_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static int __init michael_mic_init(void)
|
||||
{
|
||||
return crypto_register_alg(&michael_mic_alg);
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
|
||||
static void __exit michael_mic_exit(void)
|
||||
{
|
||||
crypto_unregister_alg(&michael_mic_alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -94,6 +94,17 @@ static int c_show(struct seq_file *m, void *p)
|
|||
seq_printf(m, "selftest : %s\n",
|
||||
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
|
||||
"passed" : "unknown");
|
||||
|
||||
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
|
||||
seq_printf(m, "type : larval\n");
|
||||
seq_printf(m, "flags : 0x%x\n", alg->cra_flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (alg->cra_type && alg->cra_type->show) {
|
||||
alg->cra_type->show(m, alg);
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
|
||||
case CRYPTO_ALG_TYPE_CIPHER:
|
||||
|
@ -115,16 +126,11 @@ static int c_show(struct seq_file *m, void *p)
|
|||
seq_printf(m, "type : compression\n");
|
||||
break;
|
||||
default:
|
||||
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
|
||||
seq_printf(m, "type : larval\n");
|
||||
seq_printf(m, "flags : 0x%x\n", alg->cra_flags);
|
||||
} else if (alg->cra_type && alg->cra_type->show)
|
||||
alg->cra_type->show(m, alg);
|
||||
else
|
||||
seq_printf(m, "type : unknown\n");
|
||||
seq_printf(m, "type : unknown\n");
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -13,11 +13,10 @@
|
|||
* any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
@ -218,9 +217,9 @@ static void rmd128_transform(u32 *state, const __le32 *in)
|
|||
return;
|
||||
}
|
||||
|
||||
static void rmd128_init(struct crypto_tfm *tfm)
|
||||
static int rmd128_init(struct shash_desc *desc)
|
||||
{
|
||||
struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd128_ctx *rctx = shash_desc_ctx(desc);
|
||||
|
||||
rctx->byte_count = 0;
|
||||
|
||||
|
@ -230,12 +229,14 @@ static void rmd128_init(struct crypto_tfm *tfm)
|
|||
rctx->state[3] = RMD_H3;
|
||||
|
||||
memset(rctx->buffer, 0, sizeof(rctx->buffer));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmd128_update(struct crypto_tfm *tfm, const u8 *data,
|
||||
unsigned int len)
|
||||
static int rmd128_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd128_ctx *rctx = shash_desc_ctx(desc);
|
||||
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
|
||||
|
||||
rctx->byte_count += len;
|
||||
|
@ -244,7 +245,7 @@ static void rmd128_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
if (avail > len) {
|
||||
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
|
||||
data, len);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
|
||||
|
@ -262,12 +263,15 @@ static void rmd128_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
}
|
||||
|
||||
memcpy(rctx->buffer, data, len);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
static void rmd128_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int rmd128_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd128_ctx *rctx = shash_desc_ctx(desc);
|
||||
u32 i, index, padlen;
|
||||
__le64 bits;
|
||||
__le32 *dst = (__le32 *)out;
|
||||
|
@ -278,10 +282,10 @@ static void rmd128_final(struct crypto_tfm *tfm, u8 *out)
|
|||
/* Pad out to 56 mod 64 */
|
||||
index = rctx->byte_count & 0x3f;
|
||||
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
|
||||
rmd128_update(tfm, padding, padlen);
|
||||
rmd128_update(desc, padding, padlen);
|
||||
|
||||
/* Append length */
|
||||
rmd128_update(tfm, (const u8 *)&bits, sizeof(bits));
|
||||
rmd128_update(desc, (const u8 *)&bits, sizeof(bits));
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 4; i++)
|
||||
|
@ -289,31 +293,32 @@ static void rmd128_final(struct crypto_tfm *tfm, u8 *out)
|
|||
|
||||
/* Wipe context */
|
||||
memset(rctx, 0, sizeof(*rctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "rmd128",
|
||||
.cra_driver_name = "rmd128",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = RMD128_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct rmd128_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = RMD128_DIGEST_SIZE,
|
||||
.dia_init = rmd128_init,
|
||||
.dia_update = rmd128_update,
|
||||
.dia_final = rmd128_final } }
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = RMD128_DIGEST_SIZE,
|
||||
.init = rmd128_init,
|
||||
.update = rmd128_update,
|
||||
.final = rmd128_final,
|
||||
.descsize = sizeof(struct rmd128_ctx),
|
||||
.base = {
|
||||
.cra_name = "rmd128",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = RMD128_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init rmd128_mod_init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit rmd128_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(rmd128_mod_init);
|
||||
|
@ -321,5 +326,3 @@ module_exit(rmd128_mod_fini);
|
|||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
|
||||
|
||||
MODULE_ALIAS("rmd128");
|
||||
|
|
|
@ -13,11 +13,10 @@
|
|||
* any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
@ -261,9 +260,9 @@ static void rmd160_transform(u32 *state, const __le32 *in)
|
|||
return;
|
||||
}
|
||||
|
||||
static void rmd160_init(struct crypto_tfm *tfm)
|
||||
static int rmd160_init(struct shash_desc *desc)
|
||||
{
|
||||
struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
|
||||
|
||||
rctx->byte_count = 0;
|
||||
|
||||
|
@ -274,12 +273,14 @@ static void rmd160_init(struct crypto_tfm *tfm)
|
|||
rctx->state[4] = RMD_H4;
|
||||
|
||||
memset(rctx->buffer, 0, sizeof(rctx->buffer));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmd160_update(struct crypto_tfm *tfm, const u8 *data,
|
||||
unsigned int len)
|
||||
static int rmd160_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
|
||||
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
|
||||
|
||||
rctx->byte_count += len;
|
||||
|
@ -288,7 +289,7 @@ static void rmd160_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
if (avail > len) {
|
||||
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
|
||||
data, len);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
|
||||
|
@ -306,12 +307,15 @@ static void rmd160_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
}
|
||||
|
||||
memcpy(rctx->buffer, data, len);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
static void rmd160_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int rmd160_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
|
||||
u32 i, index, padlen;
|
||||
__le64 bits;
|
||||
__le32 *dst = (__le32 *)out;
|
||||
|
@ -322,10 +326,10 @@ static void rmd160_final(struct crypto_tfm *tfm, u8 *out)
|
|||
/* Pad out to 56 mod 64 */
|
||||
index = rctx->byte_count & 0x3f;
|
||||
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
|
||||
rmd160_update(tfm, padding, padlen);
|
||||
rmd160_update(desc, padding, padlen);
|
||||
|
||||
/* Append length */
|
||||
rmd160_update(tfm, (const u8 *)&bits, sizeof(bits));
|
||||
rmd160_update(desc, (const u8 *)&bits, sizeof(bits));
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 5; i++)
|
||||
|
@ -333,31 +337,32 @@ static void rmd160_final(struct crypto_tfm *tfm, u8 *out)
|
|||
|
||||
/* Wipe context */
|
||||
memset(rctx, 0, sizeof(*rctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "rmd160",
|
||||
.cra_driver_name = "rmd160",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = RMD160_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct rmd160_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = RMD160_DIGEST_SIZE,
|
||||
.dia_init = rmd160_init,
|
||||
.dia_update = rmd160_update,
|
||||
.dia_final = rmd160_final } }
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = RMD160_DIGEST_SIZE,
|
||||
.init = rmd160_init,
|
||||
.update = rmd160_update,
|
||||
.final = rmd160_final,
|
||||
.descsize = sizeof(struct rmd160_ctx),
|
||||
.base = {
|
||||
.cra_name = "rmd160",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = RMD160_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init rmd160_mod_init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit rmd160_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(rmd160_mod_init);
|
||||
|
@ -365,5 +370,3 @@ module_exit(rmd160_mod_fini);
|
|||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
|
||||
|
||||
MODULE_ALIAS("rmd160");
|
||||
|
|
|
@ -13,11 +13,10 @@
|
|||
* any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
@ -233,9 +232,9 @@ static void rmd256_transform(u32 *state, const __le32 *in)
|
|||
return;
|
||||
}
|
||||
|
||||
static void rmd256_init(struct crypto_tfm *tfm)
|
||||
static int rmd256_init(struct shash_desc *desc)
|
||||
{
|
||||
struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd256_ctx *rctx = shash_desc_ctx(desc);
|
||||
|
||||
rctx->byte_count = 0;
|
||||
|
||||
|
@ -249,12 +248,14 @@ static void rmd256_init(struct crypto_tfm *tfm)
|
|||
rctx->state[7] = RMD_H8;
|
||||
|
||||
memset(rctx->buffer, 0, sizeof(rctx->buffer));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmd256_update(struct crypto_tfm *tfm, const u8 *data,
|
||||
unsigned int len)
|
||||
static int rmd256_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd256_ctx *rctx = shash_desc_ctx(desc);
|
||||
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
|
||||
|
||||
rctx->byte_count += len;
|
||||
|
@ -263,7 +264,7 @@ static void rmd256_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
if (avail > len) {
|
||||
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
|
||||
data, len);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
|
||||
|
@ -281,12 +282,15 @@ static void rmd256_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
}
|
||||
|
||||
memcpy(rctx->buffer, data, len);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
static void rmd256_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int rmd256_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd256_ctx *rctx = shash_desc_ctx(desc);
|
||||
u32 i, index, padlen;
|
||||
__le64 bits;
|
||||
__le32 *dst = (__le32 *)out;
|
||||
|
@ -297,10 +301,10 @@ static void rmd256_final(struct crypto_tfm *tfm, u8 *out)
|
|||
/* Pad out to 56 mod 64 */
|
||||
index = rctx->byte_count & 0x3f;
|
||||
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
|
||||
rmd256_update(tfm, padding, padlen);
|
||||
rmd256_update(desc, padding, padlen);
|
||||
|
||||
/* Append length */
|
||||
rmd256_update(tfm, (const u8 *)&bits, sizeof(bits));
|
||||
rmd256_update(desc, (const u8 *)&bits, sizeof(bits));
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 8; i++)
|
||||
|
@ -308,31 +312,32 @@ static void rmd256_final(struct crypto_tfm *tfm, u8 *out)
|
|||
|
||||
/* Wipe context */
|
||||
memset(rctx, 0, sizeof(*rctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "rmd256",
|
||||
.cra_driver_name = "rmd256",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = RMD256_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct rmd256_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = RMD256_DIGEST_SIZE,
|
||||
.dia_init = rmd256_init,
|
||||
.dia_update = rmd256_update,
|
||||
.dia_final = rmd256_final } }
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = RMD256_DIGEST_SIZE,
|
||||
.init = rmd256_init,
|
||||
.update = rmd256_update,
|
||||
.final = rmd256_final,
|
||||
.descsize = sizeof(struct rmd256_ctx),
|
||||
.base = {
|
||||
.cra_name = "rmd256",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = RMD256_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init rmd256_mod_init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit rmd256_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(rmd256_mod_init);
|
||||
|
@ -340,5 +345,3 @@ module_exit(rmd256_mod_fini);
|
|||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
|
||||
|
||||
MODULE_ALIAS("rmd256");
|
||||
|
|
|
@ -13,11 +13,10 @@
|
|||
* any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
@ -280,9 +279,9 @@ static void rmd320_transform(u32 *state, const __le32 *in)
|
|||
return;
|
||||
}
|
||||
|
||||
static void rmd320_init(struct crypto_tfm *tfm)
|
||||
static int rmd320_init(struct shash_desc *desc)
|
||||
{
|
||||
struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd320_ctx *rctx = shash_desc_ctx(desc);
|
||||
|
||||
rctx->byte_count = 0;
|
||||
|
||||
|
@ -298,12 +297,14 @@ static void rmd320_init(struct crypto_tfm *tfm)
|
|||
rctx->state[9] = RMD_H9;
|
||||
|
||||
memset(rctx->buffer, 0, sizeof(rctx->buffer));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmd320_update(struct crypto_tfm *tfm, const u8 *data,
|
||||
unsigned int len)
|
||||
static int rmd320_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd320_ctx *rctx = shash_desc_ctx(desc);
|
||||
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
|
||||
|
||||
rctx->byte_count += len;
|
||||
|
@ -312,7 +313,7 @@ static void rmd320_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
if (avail > len) {
|
||||
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
|
||||
data, len);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
|
||||
|
@ -330,12 +331,15 @@ static void rmd320_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
}
|
||||
|
||||
memcpy(rctx->buffer, data, len);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
static void rmd320_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int rmd320_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
|
||||
struct rmd320_ctx *rctx = shash_desc_ctx(desc);
|
||||
u32 i, index, padlen;
|
||||
__le64 bits;
|
||||
__le32 *dst = (__le32 *)out;
|
||||
|
@ -346,10 +350,10 @@ static void rmd320_final(struct crypto_tfm *tfm, u8 *out)
|
|||
/* Pad out to 56 mod 64 */
|
||||
index = rctx->byte_count & 0x3f;
|
||||
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
|
||||
rmd320_update(tfm, padding, padlen);
|
||||
rmd320_update(desc, padding, padlen);
|
||||
|
||||
/* Append length */
|
||||
rmd320_update(tfm, (const u8 *)&bits, sizeof(bits));
|
||||
rmd320_update(desc, (const u8 *)&bits, sizeof(bits));
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 10; i++)
|
||||
|
@ -357,31 +361,32 @@ static void rmd320_final(struct crypto_tfm *tfm, u8 *out)
|
|||
|
||||
/* Wipe context */
|
||||
memset(rctx, 0, sizeof(*rctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "rmd320",
|
||||
.cra_driver_name = "rmd320",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = RMD320_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct rmd320_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = RMD320_DIGEST_SIZE,
|
||||
.dia_init = rmd320_init,
|
||||
.dia_update = rmd320_update,
|
||||
.dia_final = rmd320_final } }
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = RMD320_DIGEST_SIZE,
|
||||
.init = rmd320_init,
|
||||
.update = rmd320_update,
|
||||
.final = rmd320_final,
|
||||
.descsize = sizeof(struct rmd320_ctx),
|
||||
.base = {
|
||||
.cra_name = "rmd320",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = RMD320_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init rmd320_mod_init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit rmd320_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(rmd320_mod_init);
|
||||
|
@ -389,5 +394,3 @@ module_exit(rmd320_mod_fini);
|
|||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
|
||||
|
||||
MODULE_ALIAS("rmd320");
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
@ -42,10 +43,6 @@ D. J. Bernstein
|
|||
Public domain.
|
||||
*/
|
||||
|
||||
#define ROTATE(v,n) (((v) << (n)) | ((v) >> (32 - (n))))
|
||||
#define XOR(v,w) ((v) ^ (w))
|
||||
#define PLUS(v,w) (((v) + (w)))
|
||||
#define PLUSONE(v) (PLUS((v),1))
|
||||
#define U32TO8_LITTLE(p, v) \
|
||||
{ (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \
|
||||
(p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; }
|
||||
|
@ -65,41 +62,41 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
|
|||
|
||||
memcpy(x, input, sizeof(x));
|
||||
for (i = 20; i > 0; i -= 2) {
|
||||
x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 0],x[12]), 7));
|
||||
x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[ 4],x[ 0]), 9));
|
||||
x[12] = XOR(x[12],ROTATE(PLUS(x[ 8],x[ 4]),13));
|
||||
x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[12],x[ 8]),18));
|
||||
x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 5],x[ 1]), 7));
|
||||
x[13] = XOR(x[13],ROTATE(PLUS(x[ 9],x[ 5]), 9));
|
||||
x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[13],x[ 9]),13));
|
||||
x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 1],x[13]),18));
|
||||
x[14] = XOR(x[14],ROTATE(PLUS(x[10],x[ 6]), 7));
|
||||
x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[14],x[10]), 9));
|
||||
x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 2],x[14]),13));
|
||||
x[10] = XOR(x[10],ROTATE(PLUS(x[ 6],x[ 2]),18));
|
||||
x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[15],x[11]), 7));
|
||||
x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 3],x[15]), 9));
|
||||
x[11] = XOR(x[11],ROTATE(PLUS(x[ 7],x[ 3]),13));
|
||||
x[15] = XOR(x[15],ROTATE(PLUS(x[11],x[ 7]),18));
|
||||
x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[ 0],x[ 3]), 7));
|
||||
x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[ 1],x[ 0]), 9));
|
||||
x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[ 2],x[ 1]),13));
|
||||
x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[ 3],x[ 2]),18));
|
||||
x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 5],x[ 4]), 7));
|
||||
x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 6],x[ 5]), 9));
|
||||
x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 7],x[ 6]),13));
|
||||
x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 4],x[ 7]),18));
|
||||
x[11] = XOR(x[11],ROTATE(PLUS(x[10],x[ 9]), 7));
|
||||
x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[11],x[10]), 9));
|
||||
x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 8],x[11]),13));
|
||||
x[10] = XOR(x[10],ROTATE(PLUS(x[ 9],x[ 8]),18));
|
||||
x[12] = XOR(x[12],ROTATE(PLUS(x[15],x[14]), 7));
|
||||
x[13] = XOR(x[13],ROTATE(PLUS(x[12],x[15]), 9));
|
||||
x[14] = XOR(x[14],ROTATE(PLUS(x[13],x[12]),13));
|
||||
x[15] = XOR(x[15],ROTATE(PLUS(x[14],x[13]),18));
|
||||
x[ 4] ^= rol32((x[ 0] + x[12]), 7);
|
||||
x[ 8] ^= rol32((x[ 4] + x[ 0]), 9);
|
||||
x[12] ^= rol32((x[ 8] + x[ 4]), 13);
|
||||
x[ 0] ^= rol32((x[12] + x[ 8]), 18);
|
||||
x[ 9] ^= rol32((x[ 5] + x[ 1]), 7);
|
||||
x[13] ^= rol32((x[ 9] + x[ 5]), 9);
|
||||
x[ 1] ^= rol32((x[13] + x[ 9]), 13);
|
||||
x[ 5] ^= rol32((x[ 1] + x[13]), 18);
|
||||
x[14] ^= rol32((x[10] + x[ 6]), 7);
|
||||
x[ 2] ^= rol32((x[14] + x[10]), 9);
|
||||
x[ 6] ^= rol32((x[ 2] + x[14]), 13);
|
||||
x[10] ^= rol32((x[ 6] + x[ 2]), 18);
|
||||
x[ 3] ^= rol32((x[15] + x[11]), 7);
|
||||
x[ 7] ^= rol32((x[ 3] + x[15]), 9);
|
||||
x[11] ^= rol32((x[ 7] + x[ 3]), 13);
|
||||
x[15] ^= rol32((x[11] + x[ 7]), 18);
|
||||
x[ 1] ^= rol32((x[ 0] + x[ 3]), 7);
|
||||
x[ 2] ^= rol32((x[ 1] + x[ 0]), 9);
|
||||
x[ 3] ^= rol32((x[ 2] + x[ 1]), 13);
|
||||
x[ 0] ^= rol32((x[ 3] + x[ 2]), 18);
|
||||
x[ 6] ^= rol32((x[ 5] + x[ 4]), 7);
|
||||
x[ 7] ^= rol32((x[ 6] + x[ 5]), 9);
|
||||
x[ 4] ^= rol32((x[ 7] + x[ 6]), 13);
|
||||
x[ 5] ^= rol32((x[ 4] + x[ 7]), 18);
|
||||
x[11] ^= rol32((x[10] + x[ 9]), 7);
|
||||
x[ 8] ^= rol32((x[11] + x[10]), 9);
|
||||
x[ 9] ^= rol32((x[ 8] + x[11]), 13);
|
||||
x[10] ^= rol32((x[ 9] + x[ 8]), 18);
|
||||
x[12] ^= rol32((x[15] + x[14]), 7);
|
||||
x[13] ^= rol32((x[12] + x[15]), 9);
|
||||
x[14] ^= rol32((x[13] + x[12]), 13);
|
||||
x[15] ^= rol32((x[14] + x[13]), 18);
|
||||
}
|
||||
for (i = 0; i < 16; ++i)
|
||||
x[i] = PLUS(x[i],input[i]);
|
||||
x[i] += input[i];
|
||||
for (i = 0; i < 16; ++i)
|
||||
U32TO8_LITTLE(output + 4 * i,x[i]);
|
||||
}
|
||||
|
@ -150,9 +147,9 @@ static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
|
|||
while (bytes) {
|
||||
salsa20_wordtobyte(buf, ctx->input);
|
||||
|
||||
ctx->input[8] = PLUSONE(ctx->input[8]);
|
||||
ctx->input[8]++;
|
||||
if (!ctx->input[8])
|
||||
ctx->input[9] = PLUSONE(ctx->input[9]);
|
||||
ctx->input[9]++;
|
||||
|
||||
if (bytes <= 64) {
|
||||
crypto_xor(dst, buf, bytes);
|
||||
|
|
|
@ -16,10 +16,10 @@
|
|||
* any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/sha.h>
|
||||
|
@ -31,9 +31,10 @@ struct sha1_ctx {
|
|||
u8 buffer[64];
|
||||
};
|
||||
|
||||
static void sha1_init(struct crypto_tfm *tfm)
|
||||
static int sha1_init(struct shash_desc *desc)
|
||||
{
|
||||
struct sha1_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha1_ctx *sctx = shash_desc_ctx(desc);
|
||||
|
||||
static const struct sha1_ctx initstate = {
|
||||
0,
|
||||
{ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
|
||||
|
@ -41,12 +42,14 @@ static void sha1_init(struct crypto_tfm *tfm)
|
|||
};
|
||||
|
||||
*sctx = initstate;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
|
||||
static int sha1_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct sha1_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha1_ctx *sctx = shash_desc_ctx(desc);
|
||||
unsigned int partial, done;
|
||||
const u8 *src;
|
||||
|
||||
|
@ -74,13 +77,15 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
partial = 0;
|
||||
}
|
||||
memcpy(sctx->buffer + partial, src, len - done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
static void sha1_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int sha1_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct sha1_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha1_ctx *sctx = shash_desc_ctx(desc);
|
||||
__be32 *dst = (__be32 *)out;
|
||||
u32 i, index, padlen;
|
||||
__be64 bits;
|
||||
|
@ -91,10 +96,10 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
|
|||
/* Pad out to 56 mod 64 */
|
||||
index = sctx->count & 0x3f;
|
||||
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
|
||||
sha1_update(tfm, padding, padlen);
|
||||
sha1_update(desc, padding, padlen);
|
||||
|
||||
/* Append length */
|
||||
sha1_update(tfm, (const u8 *)&bits, sizeof(bits));
|
||||
sha1_update(desc, (const u8 *)&bits, sizeof(bits));
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 5; i++)
|
||||
|
@ -102,32 +107,33 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
|
|||
|
||||
/* Wipe context */
|
||||
memset(sctx, 0, sizeof *sctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "sha1",
|
||||
.cra_driver_name= "sha1-generic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = SHA1_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct sha1_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = SHA1_DIGEST_SIZE,
|
||||
.dia_init = sha1_init,
|
||||
.dia_update = sha1_update,
|
||||
.dia_final = sha1_final } }
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = SHA1_DIGEST_SIZE,
|
||||
.init = sha1_init,
|
||||
.update = sha1_update,
|
||||
.final = sha1_final,
|
||||
.descsize = sizeof(struct sha1_ctx),
|
||||
.base = {
|
||||
.cra_name = "sha1",
|
||||
.cra_driver_name= "sha1-generic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA1_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init sha1_generic_mod_init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit sha1_generic_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(sha1_generic_mod_init);
|
||||
|
|
|
@ -17,10 +17,10 @@
|
|||
* any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
@ -69,7 +69,7 @@ static void sha256_transform(u32 *state, const u8 *input)
|
|||
/* now blend */
|
||||
for (i = 16; i < 64; i++)
|
||||
BLEND_OP(i, W);
|
||||
|
||||
|
||||
/* load the state into our registers */
|
||||
a=state[0]; b=state[1]; c=state[2]; d=state[3];
|
||||
e=state[4]; f=state[5]; g=state[6]; h=state[7];
|
||||
|
@ -220,9 +220,9 @@ static void sha256_transform(u32 *state, const u8 *input)
|
|||
}
|
||||
|
||||
|
||||
static void sha224_init(struct crypto_tfm *tfm)
|
||||
static int sha224_init(struct shash_desc *desc)
|
||||
{
|
||||
struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha256_ctx *sctx = shash_desc_ctx(desc);
|
||||
sctx->state[0] = SHA224_H0;
|
||||
sctx->state[1] = SHA224_H1;
|
||||
sctx->state[2] = SHA224_H2;
|
||||
|
@ -233,11 +233,13 @@ static void sha224_init(struct crypto_tfm *tfm)
|
|||
sctx->state[7] = SHA224_H7;
|
||||
sctx->count[0] = 0;
|
||||
sctx->count[1] = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sha256_init(struct crypto_tfm *tfm)
|
||||
static int sha256_init(struct shash_desc *desc)
|
||||
{
|
||||
struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha256_ctx *sctx = shash_desc_ctx(desc);
|
||||
sctx->state[0] = SHA256_H0;
|
||||
sctx->state[1] = SHA256_H1;
|
||||
sctx->state[2] = SHA256_H2;
|
||||
|
@ -247,12 +249,14 @@ static void sha256_init(struct crypto_tfm *tfm)
|
|||
sctx->state[6] = SHA256_H6;
|
||||
sctx->state[7] = SHA256_H7;
|
||||
sctx->count[0] = sctx->count[1] = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
|
||||
static int sha256_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha256_ctx *sctx = shash_desc_ctx(desc);
|
||||
unsigned int i, index, part_len;
|
||||
|
||||
/* Compute number of bytes mod 128 */
|
||||
|
@ -277,14 +281,16 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
|
|||
} else {
|
||||
i = 0;
|
||||
}
|
||||
|
||||
|
||||
/* Buffer remaining input */
|
||||
memcpy(&sctx->buf[index], &data[i], len-i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sha256_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int sha256_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha256_ctx *sctx = shash_desc_ctx(desc);
|
||||
__be32 *dst = (__be32 *)out;
|
||||
__be32 bits[2];
|
||||
unsigned int index, pad_len;
|
||||
|
@ -298,10 +304,10 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
|
|||
/* Pad out to 56 mod 64. */
|
||||
index = (sctx->count[0] >> 3) & 0x3f;
|
||||
pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
|
||||
sha256_update(tfm, padding, pad_len);
|
||||
sha256_update(desc, padding, pad_len);
|
||||
|
||||
/* Append length (before padding) */
|
||||
sha256_update(tfm, (const u8 *)bits, sizeof(bits));
|
||||
sha256_update(desc, (const u8 *)bits, sizeof(bits));
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 8; i++)
|
||||
|
@ -309,71 +315,73 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
|
|||
|
||||
/* Zeroize sensitive information. */
|
||||
memset(sctx, 0, sizeof(*sctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sha224_final(struct crypto_tfm *tfm, u8 *hash)
|
||||
static int sha224_final(struct shash_desc *desc, u8 *hash)
|
||||
{
|
||||
u8 D[SHA256_DIGEST_SIZE];
|
||||
|
||||
sha256_final(tfm, D);
|
||||
sha256_final(desc, D);
|
||||
|
||||
memcpy(hash, D, SHA224_DIGEST_SIZE);
|
||||
memset(D, 0, SHA256_DIGEST_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg sha256 = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name= "sha256-generic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct sha256_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_list = LIST_HEAD_INIT(sha256.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = SHA256_DIGEST_SIZE,
|
||||
.dia_init = sha256_init,
|
||||
.dia_update = sha256_update,
|
||||
.dia_final = sha256_final } }
|
||||
static struct shash_alg sha256 = {
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.init = sha256_init,
|
||||
.update = sha256_update,
|
||||
.final = sha256_final,
|
||||
.descsize = sizeof(struct sha256_ctx),
|
||||
.base = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name= "sha256-generic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static struct crypto_alg sha224 = {
|
||||
.cra_name = "sha224",
|
||||
.cra_driver_name = "sha224-generic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct sha256_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_list = LIST_HEAD_INIT(sha224.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = SHA224_DIGEST_SIZE,
|
||||
.dia_init = sha224_init,
|
||||
.dia_update = sha256_update,
|
||||
.dia_final = sha224_final } }
|
||||
static struct shash_alg sha224 = {
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.init = sha224_init,
|
||||
.update = sha256_update,
|
||||
.final = sha224_final,
|
||||
.descsize = sizeof(struct sha256_ctx),
|
||||
.base = {
|
||||
.cra_name = "sha224",
|
||||
.cra_driver_name= "sha224-generic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init sha256_generic_mod_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = crypto_register_alg(&sha224);
|
||||
ret = crypto_register_shash(&sha224);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = crypto_register_alg(&sha256);
|
||||
ret = crypto_register_shash(&sha256);
|
||||
|
||||
if (ret < 0)
|
||||
crypto_unregister_alg(&sha224);
|
||||
crypto_unregister_shash(&sha224);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit sha256_generic_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&sha224);
|
||||
crypto_unregister_alg(&sha256);
|
||||
crypto_unregister_shash(&sha224);
|
||||
crypto_unregister_shash(&sha256);
|
||||
}
|
||||
|
||||
module_init(sha256_generic_mod_init);
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
* later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -18,16 +18,17 @@
|
|||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/sha.h>
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
struct sha512_ctx {
|
||||
u64 state[8];
|
||||
u32 count[4];
|
||||
u8 buf[128];
|
||||
u64 W[80];
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(u64[80], msg_schedule);
|
||||
|
||||
static inline u64 Ch(u64 x, u64 y, u64 z)
|
||||
{
|
||||
return z ^ (x & (y ^ z));
|
||||
|
@ -89,11 +90,12 @@ static inline void BLEND_OP(int I, u64 *W)
|
|||
}
|
||||
|
||||
static void
|
||||
sha512_transform(u64 *state, u64 *W, const u8 *input)
|
||||
sha512_transform(u64 *state, const u8 *input)
|
||||
{
|
||||
u64 a, b, c, d, e, f, g, h, t1, t2;
|
||||
|
||||
int i;
|
||||
u64 *W = get_cpu_var(msg_schedule);
|
||||
|
||||
/* load the input */
|
||||
for (i = 0; i < 16; i++)
|
||||
|
@ -132,12 +134,14 @@ sha512_transform(u64 *state, u64 *W, const u8 *input)
|
|||
|
||||
/* erase our data */
|
||||
a = b = c = d = e = f = g = h = t1 = t2 = 0;
|
||||
memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
|
||||
put_cpu_var(msg_schedule);
|
||||
}
|
||||
|
||||
static void
|
||||
sha512_init(struct crypto_tfm *tfm)
|
||||
static int
|
||||
sha512_init(struct shash_desc *desc)
|
||||
{
|
||||
struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha512_ctx *sctx = shash_desc_ctx(desc);
|
||||
sctx->state[0] = SHA512_H0;
|
||||
sctx->state[1] = SHA512_H1;
|
||||
sctx->state[2] = SHA512_H2;
|
||||
|
@ -147,12 +151,14 @@ sha512_init(struct crypto_tfm *tfm)
|
|||
sctx->state[6] = SHA512_H6;
|
||||
sctx->state[7] = SHA512_H7;
|
||||
sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
sha384_init(struct crypto_tfm *tfm)
|
||||
static int
|
||||
sha384_init(struct shash_desc *desc)
|
||||
{
|
||||
struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha512_ctx *sctx = shash_desc_ctx(desc);
|
||||
sctx->state[0] = SHA384_H0;
|
||||
sctx->state[1] = SHA384_H1;
|
||||
sctx->state[2] = SHA384_H2;
|
||||
|
@ -162,12 +168,14 @@ sha384_init(struct crypto_tfm *tfm)
|
|||
sctx->state[6] = SHA384_H6;
|
||||
sctx->state[7] = SHA384_H7;
|
||||
sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
||||
static int
|
||||
sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
||||
{
|
||||
struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha512_ctx *sctx = shash_desc_ctx(desc);
|
||||
|
||||
unsigned int i, index, part_len;
|
||||
|
||||
|
@ -187,10 +195,10 @@ sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|||
/* Transform as many times as possible. */
|
||||
if (len >= part_len) {
|
||||
memcpy(&sctx->buf[index], data, part_len);
|
||||
sha512_transform(sctx->state, sctx->W, sctx->buf);
|
||||
sha512_transform(sctx->state, sctx->buf);
|
||||
|
||||
for (i = part_len; i + 127 < len; i+=128)
|
||||
sha512_transform(sctx->state, sctx->W, &data[i]);
|
||||
sha512_transform(sctx->state, &data[i]);
|
||||
|
||||
index = 0;
|
||||
} else {
|
||||
|
@ -200,14 +208,13 @@ sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|||
/* Buffer remaining input */
|
||||
memcpy(&sctx->buf[index], &data[i], len - i);
|
||||
|
||||
/* erase our data */
|
||||
memset(sctx->W, 0, sizeof(sctx->W));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
sha512_final(struct crypto_tfm *tfm, u8 *hash)
|
||||
static int
|
||||
sha512_final(struct shash_desc *desc, u8 *hash)
|
||||
{
|
||||
struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct sha512_ctx *sctx = shash_desc_ctx(desc);
|
||||
static u8 padding[128] = { 0x80, };
|
||||
__be64 *dst = (__be64 *)hash;
|
||||
__be32 bits[4];
|
||||
|
@ -223,10 +230,10 @@ sha512_final(struct crypto_tfm *tfm, u8 *hash)
|
|||
/* Pad out to 112 mod 128. */
|
||||
index = (sctx->count[0] >> 3) & 0x7f;
|
||||
pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
|
||||
sha512_update(tfm, padding, pad_len);
|
||||
sha512_update(desc, padding, pad_len);
|
||||
|
||||
/* Append length (before padding) */
|
||||
sha512_update(tfm, (const u8 *)bits, sizeof(bits));
|
||||
sha512_update(desc, (const u8 *)bits, sizeof(bits));
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 8; i++)
|
||||
|
@ -234,66 +241,66 @@ sha512_final(struct crypto_tfm *tfm, u8 *hash)
|
|||
|
||||
/* Zeroize sensitive information. */
|
||||
memset(sctx, 0, sizeof(struct sha512_ctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sha384_final(struct crypto_tfm *tfm, u8 *hash)
|
||||
static int sha384_final(struct shash_desc *desc, u8 *hash)
|
||||
{
|
||||
u8 D[64];
|
||||
u8 D[64];
|
||||
|
||||
sha512_final(tfm, D);
|
||||
sha512_final(desc, D);
|
||||
|
||||
memcpy(hash, D, 48);
|
||||
memset(D, 0, 64);
|
||||
memcpy(hash, D, 48);
|
||||
memset(D, 0, 64);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg sha512 = {
|
||||
.cra_name = "sha512",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = SHA512_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct sha512_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_list = LIST_HEAD_INIT(sha512.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = SHA512_DIGEST_SIZE,
|
||||
.dia_init = sha512_init,
|
||||
.dia_update = sha512_update,
|
||||
.dia_final = sha512_final }
|
||||
}
|
||||
static struct shash_alg sha512 = {
|
||||
.digestsize = SHA512_DIGEST_SIZE,
|
||||
.init = sha512_init,
|
||||
.update = sha512_update,
|
||||
.final = sha512_final,
|
||||
.descsize = sizeof(struct sha512_ctx),
|
||||
.base = {
|
||||
.cra_name = "sha512",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA512_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static struct crypto_alg sha384 = {
|
||||
.cra_name = "sha384",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = SHA384_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct sha512_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(sha384.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = SHA384_DIGEST_SIZE,
|
||||
.dia_init = sha384_init,
|
||||
.dia_update = sha512_update,
|
||||
.dia_final = sha384_final }
|
||||
}
|
||||
static struct shash_alg sha384 = {
|
||||
.digestsize = SHA384_DIGEST_SIZE,
|
||||
.init = sha384_init,
|
||||
.update = sha512_update,
|
||||
.final = sha384_final,
|
||||
.descsize = sizeof(struct sha512_ctx),
|
||||
.base = {
|
||||
.cra_name = "sha384",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA384_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init sha512_generic_mod_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if ((ret = crypto_register_alg(&sha384)) < 0)
|
||||
if ((ret = crypto_register_shash(&sha384)) < 0)
|
||||
goto out;
|
||||
if ((ret = crypto_register_alg(&sha512)) < 0)
|
||||
crypto_unregister_alg(&sha384);
|
||||
if ((ret = crypto_register_shash(&sha512)) < 0)
|
||||
crypto_unregister_shash(&sha384);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit sha512_generic_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&sha384);
|
||||
crypto_unregister_alg(&sha512);
|
||||
crypto_unregister_shash(&sha384);
|
||||
crypto_unregister_shash(&sha512);
|
||||
}
|
||||
|
||||
module_init(sha512_generic_mod_init);
|
||||
|
|
|
@ -0,0 +1,508 @@
|
|||
/*
|
||||
* Synchronous Cryptographic Hash operations.
|
||||
*
|
||||
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
static const struct crypto_type crypto_shash_type;
|
||||
|
||||
static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
|
||||
{
|
||||
return container_of(tfm, struct crypto_shash, base);
|
||||
}
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
unsigned long absize;
|
||||
u8 *buffer, *alignbuffer;
|
||||
int err;
|
||||
|
||||
absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1));
|
||||
buffer = kmalloc(absize, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
|
||||
memcpy(alignbuffer, key, keylen);
|
||||
err = shash->setkey(tfm, alignbuffer, keylen);
|
||||
memset(alignbuffer, 0, keylen);
|
||||
kfree(buffer);
|
||||
return err;
|
||||
}
|
||||
|
||||
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
|
||||
if (!shash->setkey)
|
||||
return -ENOSYS;
|
||||
|
||||
if ((unsigned long)key & alignmask)
|
||||
return shash_setkey_unaligned(tfm, key, keylen);
|
||||
|
||||
return shash->setkey(tfm, key, keylen);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
|
||||
|
||||
static inline unsigned int shash_align_buffer_size(unsigned len,
|
||||
unsigned long mask)
|
||||
{
|
||||
return len + (mask & ~(__alignof__(u8 __attribute__ ((aligned))) - 1));
|
||||
}
|
||||
|
||||
static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_shash *tfm = desc->tfm;
|
||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
unsigned int unaligned_len = alignmask + 1 -
|
||||
((unsigned long)data & alignmask);
|
||||
u8 buf[shash_align_buffer_size(unaligned_len, alignmask)]
|
||||
__attribute__ ((aligned));
|
||||
|
||||
memcpy(buf, data, unaligned_len);
|
||||
|
||||
return shash->update(desc, buf, unaligned_len) ?:
|
||||
shash->update(desc, data + unaligned_len, len - unaligned_len);
|
||||
}
|
||||
|
||||
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_shash *tfm = desc->tfm;
|
||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
|
||||
if ((unsigned long)data & alignmask)
|
||||
return shash_update_unaligned(desc, data, len);
|
||||
|
||||
return shash->update(desc, data, len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_shash_update);
|
||||
|
||||
static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct crypto_shash *tfm = desc->tfm;
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
||||
unsigned int ds = crypto_shash_digestsize(tfm);
|
||||
u8 buf[shash_align_buffer_size(ds, alignmask)]
|
||||
__attribute__ ((aligned));
|
||||
int err;
|
||||
|
||||
err = shash->final(desc, buf);
|
||||
memcpy(out, buf, ds);
|
||||
return err;
|
||||
}
|
||||
|
||||
int crypto_shash_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct crypto_shash *tfm = desc->tfm;
|
||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
|
||||
if ((unsigned long)out & alignmask)
|
||||
return shash_final_unaligned(desc, out);
|
||||
|
||||
return shash->final(desc, out);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_shash_final);
|
||||
|
||||
static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
return crypto_shash_update(desc, data, len) ?:
|
||||
crypto_shash_final(desc, out);
|
||||
}
|
||||
|
||||
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
struct crypto_shash *tfm = desc->tfm;
|
||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
|
||||
if (((unsigned long)data | (unsigned long)out) & alignmask ||
|
||||
!shash->finup)
|
||||
return shash_finup_unaligned(desc, data, len, out);
|
||||
|
||||
return shash->finup(desc, data, len, out);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_shash_finup);
|
||||
|
||||
static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
return crypto_shash_init(desc) ?:
|
||||
crypto_shash_update(desc, data, len) ?:
|
||||
crypto_shash_final(desc, out);
|
||||
}
|
||||
|
||||
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
struct crypto_shash *tfm = desc->tfm;
|
||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
|
||||
if (((unsigned long)data | (unsigned long)out) & alignmask ||
|
||||
!shash->digest)
|
||||
return shash_digest_unaligned(desc, data, len, out);
|
||||
|
||||
return shash->digest(desc, data, len, out);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_shash_digest);
|
||||
|
||||
int crypto_shash_import(struct shash_desc *desc, const u8 *in)
|
||||
{
|
||||
struct crypto_shash *tfm = desc->tfm;
|
||||
struct shash_alg *alg = crypto_shash_alg(tfm);
|
||||
|
||||
memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
|
||||
|
||||
if (alg->reinit)
|
||||
alg->reinit(desc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_shash_import);
|
||||
|
||||
static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_shash **ctx = crypto_ahash_ctx(tfm);
|
||||
|
||||
return crypto_shash_setkey(*ctx, key, keylen);
|
||||
}
|
||||
|
||||
static int shash_async_init(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
||||
struct shash_desc *desc = ahash_request_ctx(req);
|
||||
|
||||
desc->tfm = *ctx;
|
||||
desc->flags = req->base.flags;
|
||||
|
||||
return crypto_shash_init(desc);
|
||||
}
|
||||
|
||||
static int shash_async_update(struct ahash_request *req)
|
||||
{
|
||||
struct shash_desc *desc = ahash_request_ctx(req);
|
||||
struct crypto_hash_walk walk;
|
||||
int nbytes;
|
||||
|
||||
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
|
||||
nbytes = crypto_hash_walk_done(&walk, nbytes))
|
||||
nbytes = crypto_shash_update(desc, walk.data, nbytes);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int shash_async_final(struct ahash_request *req)
|
||||
{
|
||||
return crypto_shash_final(ahash_request_ctx(req), req->result);
|
||||
}
|
||||
|
||||
static int shash_async_digest(struct ahash_request *req)
|
||||
{
|
||||
struct scatterlist *sg = req->src;
|
||||
unsigned int offset = sg->offset;
|
||||
unsigned int nbytes = req->nbytes;
|
||||
int err;
|
||||
|
||||
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
|
||||
struct crypto_shash **ctx =
|
||||
crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
||||
struct shash_desc *desc = ahash_request_ctx(req);
|
||||
void *data;
|
||||
|
||||
desc->tfm = *ctx;
|
||||
desc->flags = req->base.flags;
|
||||
|
||||
data = crypto_kmap(sg_page(sg), 0);
|
||||
err = crypto_shash_digest(desc, data + offset, nbytes,
|
||||
req->result);
|
||||
crypto_kunmap(data, 0);
|
||||
crypto_yield(desc->flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = shash_async_init(req);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = shash_async_update(req);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = shash_async_final(req);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_shash(*ctx);
|
||||
}
|
||||
|
||||
static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *calg = tfm->__crt_alg;
|
||||
struct shash_alg *alg = __crypto_shash_alg(calg);
|
||||
struct ahash_tfm *crt = &tfm->crt_ahash;
|
||||
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_shash *shash;
|
||||
|
||||
if (!crypto_mod_get(calg))
|
||||
return -EAGAIN;
|
||||
|
||||
shash = __crypto_shash_cast(crypto_create_tfm(
|
||||
calg, &crypto_shash_type));
|
||||
if (IS_ERR(shash)) {
|
||||
crypto_mod_put(calg);
|
||||
return PTR_ERR(shash);
|
||||
}
|
||||
|
||||
*ctx = shash;
|
||||
tfm->exit = crypto_exit_shash_ops_async;
|
||||
|
||||
crt->init = shash_async_init;
|
||||
crt->update = shash_async_update;
|
||||
crt->final = shash_async_final;
|
||||
crt->digest = shash_async_digest;
|
||||
crt->setkey = shash_async_setkey;
|
||||
|
||||
crt->digestsize = alg->digestsize;
|
||||
crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct shash_desc *desc = crypto_hash_ctx(tfm);
|
||||
|
||||
return crypto_shash_setkey(desc->tfm, key, keylen);
|
||||
}
|
||||
|
||||
static int shash_compat_init(struct hash_desc *hdesc)
|
||||
{
|
||||
struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
|
||||
|
||||
desc->flags = hdesc->flags;
|
||||
|
||||
return crypto_shash_init(desc);
|
||||
}
|
||||
|
||||
static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
|
||||
unsigned int len)
|
||||
{
|
||||
struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
|
||||
struct crypto_hash_walk walk;
|
||||
int nbytes;
|
||||
|
||||
for (nbytes = crypto_hash_walk_first_compat(hdesc, &walk, sg, len);
|
||||
nbytes > 0; nbytes = crypto_hash_walk_done(&walk, nbytes))
|
||||
nbytes = crypto_shash_update(desc, walk.data, nbytes);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
|
||||
{
|
||||
return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out);
|
||||
}
|
||||
|
||||
static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
|
||||
unsigned int nbytes, u8 *out)
|
||||
{
|
||||
unsigned int offset = sg->offset;
|
||||
int err;
|
||||
|
||||
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
|
||||
struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
|
||||
void *data;
|
||||
|
||||
desc->flags = hdesc->flags;
|
||||
|
||||
data = crypto_kmap(sg_page(sg), 0);
|
||||
err = crypto_shash_digest(desc, data + offset, nbytes, out);
|
||||
crypto_kunmap(data, 0);
|
||||
crypto_yield(desc->flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = shash_compat_init(hdesc);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = shash_compat_update(hdesc, sg, nbytes);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = shash_compat_final(hdesc, out);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct shash_desc *desc= crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_shash(desc->tfm);
|
||||
}
|
||||
|
||||
static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct hash_tfm *crt = &tfm->crt_hash;
|
||||
struct crypto_alg *calg = tfm->__crt_alg;
|
||||
struct shash_alg *alg = __crypto_shash_alg(calg);
|
||||
struct shash_desc *desc = crypto_tfm_ctx(tfm);
|
||||
struct crypto_shash *shash;
|
||||
|
||||
shash = __crypto_shash_cast(crypto_create_tfm(
|
||||
calg, &crypto_shash_type));
|
||||
if (IS_ERR(shash))
|
||||
return PTR_ERR(shash);
|
||||
|
||||
desc->tfm = shash;
|
||||
tfm->exit = crypto_exit_shash_ops_compat;
|
||||
|
||||
crt->init = shash_compat_init;
|
||||
crt->update = shash_compat_update;
|
||||
crt->final = shash_compat_final;
|
||||
crt->digest = shash_compat_digest;
|
||||
crt->setkey = shash_compat_setkey;
|
||||
|
||||
crt->digestsize = alg->digestsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
||||
{
|
||||
switch (mask & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_HASH_MASK:
|
||||
return crypto_init_shash_ops_compat(tfm);
|
||||
case CRYPTO_ALG_TYPE_AHASH_MASK:
|
||||
return crypto_init_shash_ops_async(tfm);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
struct shash_alg *salg = __crypto_shash_alg(alg);
|
||||
|
||||
switch (mask & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_HASH_MASK:
|
||||
return sizeof(struct shash_desc) + salg->descsize;
|
||||
case CRYPTO_ALG_TYPE_AHASH_MASK:
|
||||
return sizeof(struct crypto_shash *);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_shash_init_tfm(struct crypto_tfm *tfm,
|
||||
const struct crypto_type *frontend)
|
||||
{
|
||||
if (frontend->type != CRYPTO_ALG_TYPE_SHASH)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int crypto_shash_extsize(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend)
|
||||
{
|
||||
return alg->cra_ctxsize;
|
||||
}
|
||||
|
||||
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
__attribute__ ((unused));
|
||||
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
{
|
||||
struct shash_alg *salg = __crypto_shash_alg(alg);
|
||||
|
||||
seq_printf(m, "type : shash\n");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "digestsize : %u\n", salg->digestsize);
|
||||
seq_printf(m, "descsize : %u\n", salg->descsize);
|
||||
}
|
||||
|
||||
static const struct crypto_type crypto_shash_type = {
|
||||
.ctxsize = crypto_shash_ctxsize,
|
||||
.extsize = crypto_shash_extsize,
|
||||
.init = crypto_init_shash_ops,
|
||||
.init_tfm = crypto_shash_init_tfm,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show = crypto_shash_show,
|
||||
#endif
|
||||
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
||||
.maskset = CRYPTO_ALG_TYPE_MASK,
|
||||
.type = CRYPTO_ALG_TYPE_SHASH,
|
||||
.tfmsize = offsetof(struct crypto_shash, base),
|
||||
};
|
||||
|
||||
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
return __crypto_shash_cast(
|
||||
crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_shash);
|
||||
|
||||
int crypto_register_shash(struct shash_alg *alg)
|
||||
{
|
||||
struct crypto_alg *base = &alg->base;
|
||||
|
||||
if (alg->digestsize > PAGE_SIZE / 8 ||
|
||||
alg->descsize > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
base->cra_type = &crypto_shash_type;
|
||||
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
|
||||
|
||||
return crypto_register_alg(base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_register_shash);
|
||||
|
||||
int crypto_unregister_shash(struct shash_alg *alg)
|
||||
{
|
||||
return crypto_unregister_alg(&alg->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Synchronous cryptographic hash type");
|
|
@ -843,6 +843,14 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (dlen != ctemplate[i].outlen) {
|
||||
printk(KERN_ERR "alg: comp: Compression test %d "
|
||||
"failed for %s: output len = %d\n", i + 1, algo,
|
||||
dlen);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (memcmp(result, ctemplate[i].output, dlen)) {
|
||||
printk(KERN_ERR "alg: comp: Compression test %d "
|
||||
"failed for %s\n", i + 1, algo);
|
||||
|
@ -853,7 +861,7 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
|
|||
}
|
||||
|
||||
for (i = 0; i < dtcount; i++) {
|
||||
int ilen, ret, dlen = COMP_BUF_SIZE;
|
||||
int ilen, dlen = COMP_BUF_SIZE;
|
||||
|
||||
memset(result, 0, sizeof (result));
|
||||
|
||||
|
@ -867,6 +875,14 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (dlen != dtemplate[i].outlen) {
|
||||
printk(KERN_ERR "alg: comp: Decompression test %d "
|
||||
"failed for %s: output len = %d\n", i + 1, algo,
|
||||
dlen);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (memcmp(result, dtemplate[i].output, dlen)) {
|
||||
printk(KERN_ERR "alg: comp: Decompression test %d "
|
||||
"failed for %s\n", i + 1, algo);
|
||||
|
@ -1010,6 +1026,55 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int alg_test_crc32c(const struct alg_test_desc *desc,
|
||||
const char *driver, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_shash *tfm;
|
||||
u32 val;
|
||||
int err;
|
||||
|
||||
err = alg_test_hash(desc, driver, type, mask);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
tfm = crypto_alloc_shash(driver, type, mask);
|
||||
if (IS_ERR(tfm)) {
|
||||
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
|
||||
"%ld\n", driver, PTR_ERR(tfm));
|
||||
err = PTR_ERR(tfm);
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
struct {
|
||||
struct shash_desc shash;
|
||||
char ctx[crypto_shash_descsize(tfm)];
|
||||
} sdesc;
|
||||
|
||||
sdesc.shash.tfm = tfm;
|
||||
sdesc.shash.flags = 0;
|
||||
|
||||
*(u32 *)sdesc.ctx = le32_to_cpu(420553207);
|
||||
err = crypto_shash_final(&sdesc.shash, (u8 *)&val);
|
||||
if (err) {
|
||||
printk(KERN_ERR "alg: crc32c: Operation failed for "
|
||||
"%s: %d\n", driver, err);
|
||||
break;
|
||||
}
|
||||
|
||||
if (val != ~420553207) {
|
||||
printk(KERN_ERR "alg: crc32c: Test failed for %s: "
|
||||
"%d\n", driver, val);
|
||||
err = -EINVAL;
|
||||
}
|
||||
} while (0);
|
||||
|
||||
crypto_free_shash(tfm);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Please keep this list sorted by algorithm name. */
|
||||
static const struct alg_test_desc alg_test_descs[] = {
|
||||
{
|
||||
|
@ -1134,7 +1199,7 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||
}
|
||||
}, {
|
||||
.alg = "crc32c",
|
||||
.test = alg_test_hash,
|
||||
.test = alg_test_crc32c,
|
||||
.suite = {
|
||||
.hash = {
|
||||
.vecs = crc32c_tv_template,
|
||||
|
@ -1801,6 +1866,7 @@ static int alg_find_test(const char *alg)
|
|||
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
|
||||
{
|
||||
int i;
|
||||
int rc;
|
||||
|
||||
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
|
||||
char nalg[CRYPTO_MAX_ALG_NAME];
|
||||
|
@ -1820,8 +1886,12 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
|
|||
if (i < 0)
|
||||
goto notest;
|
||||
|
||||
return alg_test_descs[i].test(alg_test_descs + i, driver,
|
||||
rc = alg_test_descs[i].test(alg_test_descs + i, driver,
|
||||
type, mask);
|
||||
if (fips_enabled && rc)
|
||||
panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
|
||||
|
||||
return rc;
|
||||
|
||||
notest:
|
||||
printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
|
||||
|
|
|
@ -8349,7 +8349,7 @@ struct comp_testvec {
|
|||
|
||||
/*
|
||||
* Deflate test vectors (null-terminated strings).
|
||||
* Params: winbits=11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
|
||||
* Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
|
||||
*/
|
||||
#define DEFLATE_COMP_TEST_VECTORS 2
|
||||
#define DEFLATE_DECOMP_TEST_VECTORS 2
|
||||
|
|
135
crypto/tgr192.c
135
crypto/tgr192.c
|
@ -21,11 +21,11 @@
|
|||
* (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define TGR192_DIGEST_SIZE 24
|
||||
|
@ -495,24 +495,26 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
|
|||
tctx->c = c;
|
||||
}
|
||||
|
||||
static void tgr192_init(struct crypto_tfm *tfm)
|
||||
static int tgr192_init(struct shash_desc *desc)
|
||||
{
|
||||
struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm);
|
||||
struct tgr192_ctx *tctx = shash_desc_ctx(desc);
|
||||
|
||||
tctx->a = 0x0123456789abcdefULL;
|
||||
tctx->b = 0xfedcba9876543210ULL;
|
||||
tctx->c = 0xf096a5b4c3b2e187ULL;
|
||||
tctx->nblocks = 0;
|
||||
tctx->count = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Update the message digest with the contents
|
||||
* of INBUF with length INLEN. */
|
||||
static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf,
|
||||
static int tgr192_update(struct shash_desc *desc, const u8 *inbuf,
|
||||
unsigned int len)
|
||||
{
|
||||
struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm);
|
||||
struct tgr192_ctx *tctx = shash_desc_ctx(desc);
|
||||
|
||||
if (tctx->count == 64) { /* flush the buffer */
|
||||
tgr192_transform(tctx, tctx->hash);
|
||||
|
@ -520,15 +522,15 @@ static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf,
|
|||
tctx->nblocks++;
|
||||
}
|
||||
if (!inbuf) {
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
if (tctx->count) {
|
||||
for (; len && tctx->count < 64; len--) {
|
||||
tctx->hash[tctx->count++] = *inbuf++;
|
||||
}
|
||||
tgr192_update(tfm, NULL, 0);
|
||||
tgr192_update(desc, NULL, 0);
|
||||
if (!len) {
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -543,20 +545,22 @@ static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf,
|
|||
for (; len && tctx->count < 64; len--) {
|
||||
tctx->hash[tctx->count++] = *inbuf++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* The routine terminates the computation */
|
||||
static void tgr192_final(struct crypto_tfm *tfm, u8 * out)
|
||||
static int tgr192_final(struct shash_desc *desc, u8 * out)
|
||||
{
|
||||
struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm);
|
||||
struct tgr192_ctx *tctx = shash_desc_ctx(desc);
|
||||
__be64 *dst = (__be64 *)out;
|
||||
__be64 *be64p;
|
||||
__le32 *le32p;
|
||||
u32 t, msb, lsb;
|
||||
|
||||
tgr192_update(tfm, NULL, 0); /* flush */ ;
|
||||
tgr192_update(desc, NULL, 0); /* flush */ ;
|
||||
|
||||
msb = 0;
|
||||
t = tctx->nblocks;
|
||||
|
@ -584,7 +588,7 @@ static void tgr192_final(struct crypto_tfm *tfm, u8 * out)
|
|||
while (tctx->count < 64) {
|
||||
tctx->hash[tctx->count++] = 0;
|
||||
}
|
||||
tgr192_update(tfm, NULL, 0); /* flush */ ;
|
||||
tgr192_update(desc, NULL, 0); /* flush */ ;
|
||||
memset(tctx->hash, 0, 56); /* fill next block with zeroes */
|
||||
}
|
||||
/* append the 64 bit count */
|
||||
|
@ -598,91 +602,94 @@ static void tgr192_final(struct crypto_tfm *tfm, u8 * out)
|
|||
dst[0] = be64p[0] = cpu_to_be64(tctx->a);
|
||||
dst[1] = be64p[1] = cpu_to_be64(tctx->b);
|
||||
dst[2] = be64p[2] = cpu_to_be64(tctx->c);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tgr160_final(struct crypto_tfm *tfm, u8 * out)
|
||||
static int tgr160_final(struct shash_desc *desc, u8 * out)
|
||||
{
|
||||
u8 D[64];
|
||||
|
||||
tgr192_final(tfm, D);
|
||||
tgr192_final(desc, D);
|
||||
memcpy(out, D, TGR160_DIGEST_SIZE);
|
||||
memset(D, 0, TGR192_DIGEST_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tgr128_final(struct crypto_tfm *tfm, u8 * out)
|
||||
static int tgr128_final(struct shash_desc *desc, u8 * out)
|
||||
{
|
||||
u8 D[64];
|
||||
|
||||
tgr192_final(tfm, D);
|
||||
tgr192_final(desc, D);
|
||||
memcpy(out, D, TGR128_DIGEST_SIZE);
|
||||
memset(D, 0, TGR192_DIGEST_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg tgr192 = {
|
||||
.cra_name = "tgr192",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = TGR192_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct tgr192_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 7,
|
||||
.cra_list = LIST_HEAD_INIT(tgr192.cra_list),
|
||||
.cra_u = {.digest = {
|
||||
.dia_digestsize = TGR192_DIGEST_SIZE,
|
||||
.dia_init = tgr192_init,
|
||||
.dia_update = tgr192_update,
|
||||
.dia_final = tgr192_final}}
|
||||
static struct shash_alg tgr192 = {
|
||||
.digestsize = TGR192_DIGEST_SIZE,
|
||||
.init = tgr192_init,
|
||||
.update = tgr192_update,
|
||||
.final = tgr192_final,
|
||||
.descsize = sizeof(struct tgr192_ctx),
|
||||
.base = {
|
||||
.cra_name = "tgr192",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = TGR192_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static struct crypto_alg tgr160 = {
|
||||
.cra_name = "tgr160",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = TGR192_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct tgr192_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 7,
|
||||
.cra_list = LIST_HEAD_INIT(tgr160.cra_list),
|
||||
.cra_u = {.digest = {
|
||||
.dia_digestsize = TGR160_DIGEST_SIZE,
|
||||
.dia_init = tgr192_init,
|
||||
.dia_update = tgr192_update,
|
||||
.dia_final = tgr160_final}}
|
||||
static struct shash_alg tgr160 = {
|
||||
.digestsize = TGR160_DIGEST_SIZE,
|
||||
.init = tgr192_init,
|
||||
.update = tgr192_update,
|
||||
.final = tgr160_final,
|
||||
.descsize = sizeof(struct tgr192_ctx),
|
||||
.base = {
|
||||
.cra_name = "tgr160",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = TGR192_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static struct crypto_alg tgr128 = {
|
||||
.cra_name = "tgr128",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = TGR192_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct tgr192_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 7,
|
||||
.cra_list = LIST_HEAD_INIT(tgr128.cra_list),
|
||||
.cra_u = {.digest = {
|
||||
.dia_digestsize = TGR128_DIGEST_SIZE,
|
||||
.dia_init = tgr192_init,
|
||||
.dia_update = tgr192_update,
|
||||
.dia_final = tgr128_final}}
|
||||
static struct shash_alg tgr128 = {
|
||||
.digestsize = TGR128_DIGEST_SIZE,
|
||||
.init = tgr192_init,
|
||||
.update = tgr192_update,
|
||||
.final = tgr128_final,
|
||||
.descsize = sizeof(struct tgr192_ctx),
|
||||
.base = {
|
||||
.cra_name = "tgr128",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = TGR192_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init tgr192_mod_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = crypto_register_alg(&tgr192);
|
||||
ret = crypto_register_shash(&tgr192);
|
||||
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = crypto_register_alg(&tgr160);
|
||||
ret = crypto_register_shash(&tgr160);
|
||||
if (ret < 0) {
|
||||
crypto_unregister_alg(&tgr192);
|
||||
crypto_unregister_shash(&tgr192);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = crypto_register_alg(&tgr128);
|
||||
ret = crypto_register_shash(&tgr128);
|
||||
if (ret < 0) {
|
||||
crypto_unregister_alg(&tgr192);
|
||||
crypto_unregister_alg(&tgr160);
|
||||
crypto_unregister_shash(&tgr192);
|
||||
crypto_unregister_shash(&tgr160);
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
|
@ -690,9 +697,9 @@ static int __init tgr192_mod_init(void)
|
|||
|
||||
static void __exit tgr192_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&tgr192);
|
||||
crypto_unregister_alg(&tgr160);
|
||||
crypto_unregister_alg(&tgr128);
|
||||
crypto_unregister_shash(&tgr192);
|
||||
crypto_unregister_shash(&tgr160);
|
||||
crypto_unregister_shash(&tgr128);
|
||||
}
|
||||
|
||||
MODULE_ALIAS("tgr160");
|
||||
|
|
121
crypto/wp512.c
121
crypto/wp512.c
|
@ -19,11 +19,11 @@
|
|||
* (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define WP512_DIGEST_SIZE 64
|
||||
|
@ -980,8 +980,8 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) {
|
|||
|
||||
}
|
||||
|
||||
static void wp512_init(struct crypto_tfm *tfm) {
|
||||
struct wp512_ctx *wctx = crypto_tfm_ctx(tfm);
|
||||
static int wp512_init(struct shash_desc *desc) {
|
||||
struct wp512_ctx *wctx = shash_desc_ctx(desc);
|
||||
int i;
|
||||
|
||||
memset(wctx->bitLength, 0, 32);
|
||||
|
@ -990,12 +990,14 @@ static void wp512_init(struct crypto_tfm *tfm) {
|
|||
for (i = 0; i < 8; i++) {
|
||||
wctx->hash[i] = 0L;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wp512_update(struct crypto_tfm *tfm, const u8 *source,
|
||||
static int wp512_update(struct shash_desc *desc, const u8 *source,
|
||||
unsigned int len)
|
||||
{
|
||||
struct wp512_ctx *wctx = crypto_tfm_ctx(tfm);
|
||||
struct wp512_ctx *wctx = shash_desc_ctx(desc);
|
||||
int sourcePos = 0;
|
||||
unsigned int bits_len = len * 8; // convert to number of bits
|
||||
int sourceGap = (8 - ((int)bits_len & 7)) & 7;
|
||||
|
@ -1051,11 +1053,12 @@ static void wp512_update(struct crypto_tfm *tfm, const u8 *source,
|
|||
wctx->bufferBits = bufferBits;
|
||||
wctx->bufferPos = bufferPos;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wp512_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int wp512_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct wp512_ctx *wctx = crypto_tfm_ctx(tfm);
|
||||
struct wp512_ctx *wctx = shash_desc_ctx(desc);
|
||||
int i;
|
||||
u8 *buffer = wctx->buffer;
|
||||
u8 *bitLength = wctx->bitLength;
|
||||
|
@ -1084,89 +1087,95 @@ static void wp512_final(struct crypto_tfm *tfm, u8 *out)
|
|||
digest[i] = cpu_to_be64(wctx->hash[i]);
|
||||
wctx->bufferBits = bufferBits;
|
||||
wctx->bufferPos = bufferPos;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wp384_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int wp384_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
u8 D[64];
|
||||
|
||||
wp512_final(tfm, D);
|
||||
wp512_final(desc, D);
|
||||
memcpy (out, D, WP384_DIGEST_SIZE);
|
||||
memset (D, 0, WP512_DIGEST_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wp256_final(struct crypto_tfm *tfm, u8 *out)
|
||||
static int wp256_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
u8 D[64];
|
||||
|
||||
wp512_final(tfm, D);
|
||||
wp512_final(desc, D);
|
||||
memcpy (out, D, WP256_DIGEST_SIZE);
|
||||
memset (D, 0, WP512_DIGEST_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg wp512 = {
|
||||
.cra_name = "wp512",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = WP512_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct wp512_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(wp512.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = WP512_DIGEST_SIZE,
|
||||
.dia_init = wp512_init,
|
||||
.dia_update = wp512_update,
|
||||
.dia_final = wp512_final } }
|
||||
static struct shash_alg wp512 = {
|
||||
.digestsize = WP512_DIGEST_SIZE,
|
||||
.init = wp512_init,
|
||||
.update = wp512_update,
|
||||
.final = wp512_final,
|
||||
.descsize = sizeof(struct wp512_ctx),
|
||||
.base = {
|
||||
.cra_name = "wp512",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = WP512_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static struct crypto_alg wp384 = {
|
||||
.cra_name = "wp384",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = WP512_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct wp512_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(wp384.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = WP384_DIGEST_SIZE,
|
||||
.dia_init = wp512_init,
|
||||
.dia_update = wp512_update,
|
||||
.dia_final = wp384_final } }
|
||||
static struct shash_alg wp384 = {
|
||||
.digestsize = WP384_DIGEST_SIZE,
|
||||
.init = wp512_init,
|
||||
.update = wp512_update,
|
||||
.final = wp384_final,
|
||||
.descsize = sizeof(struct wp512_ctx),
|
||||
.base = {
|
||||
.cra_name = "wp384",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = WP512_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static struct crypto_alg wp256 = {
|
||||
.cra_name = "wp256",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = WP512_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct wp512_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(wp256.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = WP256_DIGEST_SIZE,
|
||||
.dia_init = wp512_init,
|
||||
.dia_update = wp512_update,
|
||||
.dia_final = wp256_final } }
|
||||
static struct shash_alg wp256 = {
|
||||
.digestsize = WP256_DIGEST_SIZE,
|
||||
.init = wp512_init,
|
||||
.update = wp512_update,
|
||||
.final = wp256_final,
|
||||
.descsize = sizeof(struct wp512_ctx),
|
||||
.base = {
|
||||
.cra_name = "wp256",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = WP512_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init wp512_mod_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = crypto_register_alg(&wp512);
|
||||
ret = crypto_register_shash(&wp512);
|
||||
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = crypto_register_alg(&wp384);
|
||||
ret = crypto_register_shash(&wp384);
|
||||
if (ret < 0)
|
||||
{
|
||||
crypto_unregister_alg(&wp512);
|
||||
crypto_unregister_shash(&wp512);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = crypto_register_alg(&wp256);
|
||||
ret = crypto_register_shash(&wp256);
|
||||
if (ret < 0)
|
||||
{
|
||||
crypto_unregister_alg(&wp512);
|
||||
crypto_unregister_alg(&wp384);
|
||||
crypto_unregister_shash(&wp512);
|
||||
crypto_unregister_shash(&wp384);
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
|
@ -1174,9 +1183,9 @@ out:
|
|||
|
||||
static void __exit wp512_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&wp512);
|
||||
crypto_unregister_alg(&wp384);
|
||||
crypto_unregister_alg(&wp256);
|
||||
crypto_unregister_shash(&wp512);
|
||||
crypto_unregister_shash(&wp384);
|
||||
crypto_unregister_shash(&wp256);
|
||||
}
|
||||
|
||||
MODULE_ALIAS("wp384");
|
||||
|
|
|
@ -38,9 +38,6 @@
|
|||
|
||||
#include <asm/kmap_types.h>
|
||||
|
||||
#undef dprintk
|
||||
|
||||
#define HIFN_TEST
|
||||
//#define HIFN_DEBUG
|
||||
|
||||
#ifdef HIFN_DEBUG
|
||||
|
@ -363,14 +360,14 @@ static atomic_t hifn_dev_number;
|
|||
#define HIFN_NAMESIZE 32
|
||||
#define HIFN_MAX_RESULT_ORDER 5
|
||||
|
||||
#define HIFN_D_CMD_RSIZE 24*4
|
||||
#define HIFN_D_SRC_RSIZE 80*4
|
||||
#define HIFN_D_DST_RSIZE 80*4
|
||||
#define HIFN_D_RES_RSIZE 24*4
|
||||
#define HIFN_D_CMD_RSIZE 24*1
|
||||
#define HIFN_D_SRC_RSIZE 80*1
|
||||
#define HIFN_D_DST_RSIZE 80*1
|
||||
#define HIFN_D_RES_RSIZE 24*1
|
||||
|
||||
#define HIFN_D_DST_DALIGN 4
|
||||
|
||||
#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1
|
||||
#define HIFN_QUEUE_LENGTH (HIFN_D_CMD_RSIZE - 1)
|
||||
|
||||
#define AES_MIN_KEY_SIZE 16
|
||||
#define AES_MAX_KEY_SIZE 32
|
||||
|
@ -406,8 +403,6 @@ struct hifn_dma {
|
|||
u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
|
||||
u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
|
||||
|
||||
u64 test_src, test_dst;
|
||||
|
||||
/*
|
||||
* Our current positions for insertion and removal from the descriptor
|
||||
* rings.
|
||||
|
@ -434,9 +429,6 @@ struct hifn_device
|
|||
struct pci_dev *pdev;
|
||||
void __iomem *bar[3];
|
||||
|
||||
unsigned long result_mem;
|
||||
dma_addr_t dst;
|
||||
|
||||
void *desc_virt;
|
||||
dma_addr_t desc_dma;
|
||||
|
||||
|
@ -446,8 +438,6 @@ struct hifn_device
|
|||
|
||||
spinlock_t lock;
|
||||
|
||||
void *priv;
|
||||
|
||||
u32 flags;
|
||||
int active, started;
|
||||
struct delayed_work work;
|
||||
|
@ -657,12 +647,17 @@ struct ablkcipher_walk
|
|||
|
||||
struct hifn_context
|
||||
{
|
||||
u8 key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv;
|
||||
u8 key[HIFN_MAX_CRYPT_KEY_LENGTH];
|
||||
struct hifn_device *dev;
|
||||
unsigned int keysize, ivsize;
|
||||
unsigned int keysize;
|
||||
};
|
||||
|
||||
struct hifn_request_context
|
||||
{
|
||||
u8 *iv;
|
||||
unsigned int ivsize;
|
||||
u8 op, type, mode, unused;
|
||||
struct ablkcipher_walk walk;
|
||||
atomic_t sg_num;
|
||||
};
|
||||
|
||||
#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
|
||||
|
@ -1168,7 +1163,8 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
|
|||
}
|
||||
|
||||
static int hifn_setup_cmd_desc(struct hifn_device *dev,
|
||||
struct hifn_context *ctx, void *priv, unsigned int nbytes)
|
||||
struct hifn_context *ctx, struct hifn_request_context *rctx,
|
||||
void *priv, unsigned int nbytes)
|
||||
{
|
||||
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
|
||||
int cmd_len, sa_idx;
|
||||
|
@ -1179,7 +1175,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
|
|||
buf_pos = buf = dma->command_bufs[dma->cmdi];
|
||||
|
||||
mask = 0;
|
||||
switch (ctx->op) {
|
||||
switch (rctx->op) {
|
||||
case ACRYPTO_OP_DECRYPT:
|
||||
mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
|
||||
break;
|
||||
|
@ -1196,15 +1192,15 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
|
|||
buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
|
||||
nbytes, mask, dev->snum);
|
||||
|
||||
if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) {
|
||||
if (rctx->op == ACRYPTO_OP_ENCRYPT || rctx->op == ACRYPTO_OP_DECRYPT) {
|
||||
u16 md = 0;
|
||||
|
||||
if (ctx->keysize)
|
||||
md |= HIFN_CRYPT_CMD_NEW_KEY;
|
||||
if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB)
|
||||
if (rctx->iv && rctx->mode != ACRYPTO_MODE_ECB)
|
||||
md |= HIFN_CRYPT_CMD_NEW_IV;
|
||||
|
||||
switch (ctx->mode) {
|
||||
switch (rctx->mode) {
|
||||
case ACRYPTO_MODE_ECB:
|
||||
md |= HIFN_CRYPT_CMD_MODE_ECB;
|
||||
break;
|
||||
|
@ -1221,7 +1217,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
switch (ctx->type) {
|
||||
switch (rctx->type) {
|
||||
case ACRYPTO_TYPE_AES_128:
|
||||
if (ctx->keysize != 16)
|
||||
goto err_out;
|
||||
|
@ -1256,17 +1252,18 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
|
|||
|
||||
buf_pos += hifn_setup_crypto_command(dev, buf_pos,
|
||||
nbytes, nbytes, ctx->key, ctx->keysize,
|
||||
ctx->iv, ctx->ivsize, md);
|
||||
rctx->iv, rctx->ivsize, md);
|
||||
}
|
||||
|
||||
dev->sa[sa_idx] = priv;
|
||||
dev->started++;
|
||||
|
||||
cmd_len = buf_pos - buf;
|
||||
dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
|
||||
HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
|
||||
|
||||
if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
|
||||
dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND |
|
||||
dma->cmdr[dma->cmdi].l = __cpu_to_le32(
|
||||
HIFN_D_VALID | HIFN_D_LAST |
|
||||
HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
|
||||
dma->cmdi = 0;
|
||||
|
@ -1284,7 +1281,7 @@ err_out:
|
|||
}
|
||||
|
||||
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
|
||||
unsigned int offset, unsigned int size)
|
||||
unsigned int offset, unsigned int size, int last)
|
||||
{
|
||||
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
|
||||
int idx;
|
||||
|
@ -1296,12 +1293,12 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
|
|||
|
||||
dma->srcr[idx].p = __cpu_to_le32(addr);
|
||||
dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
|
||||
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
|
||||
HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0));
|
||||
|
||||
if (++idx == HIFN_D_SRC_RSIZE) {
|
||||
dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
|
||||
HIFN_D_JUMP |
|
||||
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
|
||||
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
|
||||
(last ? HIFN_D_LAST : 0));
|
||||
idx = 0;
|
||||
}
|
||||
|
||||
|
@ -1342,7 +1339,7 @@ static void hifn_setup_res_desc(struct hifn_device *dev)
|
|||
}
|
||||
|
||||
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
|
||||
unsigned offset, unsigned size)
|
||||
unsigned offset, unsigned size, int last)
|
||||
{
|
||||
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
|
||||
int idx;
|
||||
|
@ -1353,12 +1350,12 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
|
|||
idx = dma->dsti;
|
||||
dma->dstr[idx].p = __cpu_to_le32(addr);
|
||||
dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
|
||||
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
|
||||
HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0));
|
||||
|
||||
if (++idx == HIFN_D_DST_RSIZE) {
|
||||
dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
|
||||
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
|
||||
HIFN_D_LAST);
|
||||
(last ? HIFN_D_LAST : 0));
|
||||
idx = 0;
|
||||
}
|
||||
dma->dsti = idx;
|
||||
|
@ -1370,16 +1367,52 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
|
|||
}
|
||||
}
|
||||
|
||||
static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
|
||||
struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
|
||||
struct hifn_context *ctx)
|
||||
static int hifn_setup_dma(struct hifn_device *dev,
|
||||
struct hifn_context *ctx, struct hifn_request_context *rctx,
|
||||
struct scatterlist *src, struct scatterlist *dst,
|
||||
unsigned int nbytes, void *priv)
|
||||
{
|
||||
dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
|
||||
dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
|
||||
struct scatterlist *t;
|
||||
struct page *spage, *dpage;
|
||||
unsigned int soff, doff;
|
||||
unsigned int n, len;
|
||||
|
||||
hifn_setup_src_desc(dev, spage, soff, nbytes);
|
||||
hifn_setup_cmd_desc(dev, ctx, priv, nbytes);
|
||||
hifn_setup_dst_desc(dev, dpage, doff, nbytes);
|
||||
n = nbytes;
|
||||
while (n) {
|
||||
spage = sg_page(src);
|
||||
soff = src->offset;
|
||||
len = min(src->length, n);
|
||||
|
||||
hifn_setup_src_desc(dev, spage, soff, len, n - len == 0);
|
||||
|
||||
src++;
|
||||
n -= len;
|
||||
}
|
||||
|
||||
t = &rctx->walk.cache[0];
|
||||
n = nbytes;
|
||||
while (n) {
|
||||
if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
|
||||
BUG_ON(!sg_page(t));
|
||||
dpage = sg_page(t);
|
||||
doff = 0;
|
||||
len = t->length;
|
||||
} else {
|
||||
BUG_ON(!sg_page(dst));
|
||||
dpage = sg_page(dst);
|
||||
doff = dst->offset;
|
||||
len = dst->length;
|
||||
}
|
||||
len = min(len, n);
|
||||
|
||||
hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0);
|
||||
|
||||
dst++;
|
||||
t++;
|
||||
n -= len;
|
||||
}
|
||||
|
||||
hifn_setup_cmd_desc(dev, ctx, rctx, priv, nbytes);
|
||||
hifn_setup_res_desc(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1424,32 +1457,26 @@ static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
|
|||
w->num = 0;
|
||||
}
|
||||
|
||||
static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src,
|
||||
static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
|
||||
unsigned int size, unsigned int *nbytesp)
|
||||
{
|
||||
unsigned int copy, drest = *drestp, nbytes = *nbytesp;
|
||||
int idx = 0;
|
||||
void *saddr;
|
||||
|
||||
if (drest < size || size > nbytes)
|
||||
return -EINVAL;
|
||||
|
||||
while (size) {
|
||||
copy = min(drest, min(size, src->length));
|
||||
|
||||
saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
|
||||
memcpy(daddr, saddr + src->offset, copy);
|
||||
kunmap_atomic(saddr, KM_SOFTIRQ1);
|
||||
copy = min(drest, min(size, dst->length));
|
||||
|
||||
size -= copy;
|
||||
drest -= copy;
|
||||
nbytes -= copy;
|
||||
daddr += copy;
|
||||
|
||||
dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
|
||||
__func__, copy, size, drest, nbytes);
|
||||
|
||||
src++;
|
||||
dst++;
|
||||
idx++;
|
||||
}
|
||||
|
||||
|
@ -1462,8 +1489,7 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
|
|||
static int ablkcipher_walk(struct ablkcipher_request *req,
|
||||
struct ablkcipher_walk *w)
|
||||
{
|
||||
struct scatterlist *src, *dst, *t;
|
||||
void *daddr;
|
||||
struct scatterlist *dst, *t;
|
||||
unsigned int nbytes = req->nbytes, offset, copy, diff;
|
||||
int idx, tidx, err;
|
||||
|
||||
|
@ -1473,26 +1499,22 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
|
|||
if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
|
||||
return -EINVAL;
|
||||
|
||||
src = &req->src[idx];
|
||||
dst = &req->dst[idx];
|
||||
|
||||
dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
|
||||
"nbytes: %u.\n",
|
||||
__func__, src->length, dst->length, src->offset,
|
||||
dst->offset, offset, nbytes);
|
||||
dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n",
|
||||
__func__, dst->length, dst->offset, offset, nbytes);
|
||||
|
||||
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
|
||||
!IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
|
||||
offset) {
|
||||
unsigned slen = min(src->length - offset, nbytes);
|
||||
unsigned slen = min(dst->length - offset, nbytes);
|
||||
unsigned dlen = PAGE_SIZE;
|
||||
|
||||
t = &w->cache[idx];
|
||||
|
||||
daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
|
||||
err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes);
|
||||
err = ablkcipher_add(&dlen, dst, slen, &nbytes);
|
||||
if (err < 0)
|
||||
goto err_out_unmap;
|
||||
return err;
|
||||
|
||||
idx += err;
|
||||
|
||||
|
@ -1528,21 +1550,19 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
|
|||
} else {
|
||||
copy += diff + nbytes;
|
||||
|
||||
src = &req->src[idx];
|
||||
dst = &req->dst[idx];
|
||||
|
||||
err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes);
|
||||
err = ablkcipher_add(&dlen, dst, nbytes, &nbytes);
|
||||
if (err < 0)
|
||||
goto err_out_unmap;
|
||||
return err;
|
||||
|
||||
idx += err;
|
||||
}
|
||||
|
||||
t->length = copy;
|
||||
t->offset = offset;
|
||||
|
||||
kunmap_atomic(daddr, KM_SOFTIRQ0);
|
||||
} else {
|
||||
nbytes -= min(src->length, nbytes);
|
||||
nbytes -= min(dst->length, nbytes);
|
||||
idx++;
|
||||
}
|
||||
|
||||
|
@ -1550,26 +1570,22 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
|
|||
}
|
||||
|
||||
return tidx;
|
||||
|
||||
err_out_unmap:
|
||||
kunmap_atomic(daddr, KM_SOFTIRQ0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int hifn_setup_session(struct ablkcipher_request *req)
|
||||
{
|
||||
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
|
||||
struct hifn_device *dev = ctx->dev;
|
||||
struct page *spage, *dpage;
|
||||
unsigned long soff, doff, dlen, flags;
|
||||
unsigned int nbytes = req->nbytes, idx = 0, len;
|
||||
unsigned long dlen, flags;
|
||||
unsigned int nbytes = req->nbytes, idx = 0;
|
||||
int err = -EINVAL, sg_num;
|
||||
struct scatterlist *src, *dst, *t;
|
||||
struct scatterlist *dst;
|
||||
|
||||
if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
|
||||
if (rctx->iv && !rctx->ivsize && rctx->mode != ACRYPTO_MODE_ECB)
|
||||
goto err_out_exit;
|
||||
|
||||
ctx->walk.flags = 0;
|
||||
rctx->walk.flags = 0;
|
||||
|
||||
while (nbytes) {
|
||||
dst = &req->dst[idx];
|
||||
|
@ -1577,27 +1593,23 @@ static int hifn_setup_session(struct ablkcipher_request *req)
|
|||
|
||||
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
|
||||
!IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
|
||||
ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
|
||||
rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
|
||||
|
||||
nbytes -= dlen;
|
||||
idx++;
|
||||
}
|
||||
|
||||
if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
|
||||
err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC);
|
||||
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
|
||||
err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
nbytes = req->nbytes;
|
||||
idx = 0;
|
||||
|
||||
sg_num = ablkcipher_walk(req, &ctx->walk);
|
||||
sg_num = ablkcipher_walk(req, &rctx->walk);
|
||||
if (sg_num < 0) {
|
||||
err = sg_num;
|
||||
goto err_out_exit;
|
||||
}
|
||||
atomic_set(&ctx->sg_num, sg_num);
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
|
||||
|
@ -1605,37 +1617,11 @@ static int hifn_setup_session(struct ablkcipher_request *req)
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
dev->snum++;
|
||||
dev->started += sg_num;
|
||||
|
||||
while (nbytes) {
|
||||
src = &req->src[idx];
|
||||
dst = &req->dst[idx];
|
||||
t = &ctx->walk.cache[idx];
|
||||
|
||||
if (t->length) {
|
||||
spage = dpage = sg_page(t);
|
||||
soff = doff = 0;
|
||||
len = t->length;
|
||||
} else {
|
||||
spage = sg_page(src);
|
||||
soff = src->offset;
|
||||
|
||||
dpage = sg_page(dst);
|
||||
doff = dst->offset;
|
||||
|
||||
len = dst->length;
|
||||
}
|
||||
|
||||
idx++;
|
||||
|
||||
err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes,
|
||||
req, ctx);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
nbytes -= min(len, nbytes);
|
||||
}
|
||||
|
||||
dev->active = HIFN_DEFAULT_ACTIVE_NUM;
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
@ -1645,12 +1631,13 @@ static int hifn_setup_session(struct ablkcipher_request *req)
|
|||
err_out:
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
err_out_exit:
|
||||
if (err)
|
||||
dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
|
||||
if (err) {
|
||||
printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
|
||||
"type: %u, err: %d.\n",
|
||||
dev->name, ctx->iv, ctx->ivsize,
|
||||
dev->name, rctx->iv, rctx->ivsize,
|
||||
ctx->key, ctx->keysize,
|
||||
ctx->mode, ctx->op, ctx->type, err);
|
||||
rctx->mode, rctx->op, rctx->type, err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1660,31 +1647,33 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
|
|||
int n, err;
|
||||
u8 src[16];
|
||||
struct hifn_context ctx;
|
||||
struct hifn_request_context rctx;
|
||||
u8 fips_aes_ecb_from_zero[16] = {
|
||||
0x66, 0xE9, 0x4B, 0xD4,
|
||||
0xEF, 0x8A, 0x2C, 0x3B,
|
||||
0x88, 0x4C, 0xFA, 0x59,
|
||||
0xCA, 0x34, 0x2B, 0x2E};
|
||||
struct scatterlist sg;
|
||||
|
||||
memset(src, 0, sizeof(src));
|
||||
memset(ctx.key, 0, sizeof(ctx.key));
|
||||
|
||||
ctx.dev = dev;
|
||||
ctx.keysize = 16;
|
||||
ctx.ivsize = 0;
|
||||
ctx.iv = NULL;
|
||||
ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
|
||||
ctx.mode = ACRYPTO_MODE_ECB;
|
||||
ctx.type = ACRYPTO_TYPE_AES_128;
|
||||
atomic_set(&ctx.sg_num, 1);
|
||||
rctx.ivsize = 0;
|
||||
rctx.iv = NULL;
|
||||
rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
|
||||
rctx.mode = ACRYPTO_MODE_ECB;
|
||||
rctx.type = ACRYPTO_TYPE_AES_128;
|
||||
rctx.walk.cache[0].length = 0;
|
||||
|
||||
err = hifn_setup_dma(dev,
|
||||
virt_to_page(src), offset_in_page(src),
|
||||
virt_to_page(src), offset_in_page(src),
|
||||
sizeof(src), NULL, &ctx);
|
||||
sg_init_one(&sg, &src, sizeof(src));
|
||||
|
||||
err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
dev->started = 0;
|
||||
msleep(200);
|
||||
|
||||
dprintk("%s: decoded: ", dev->name);
|
||||
|
@ -1711,6 +1700,7 @@ static int hifn_start_device(struct hifn_device *dev)
|
|||
{
|
||||
int err;
|
||||
|
||||
dev->started = dev->active = 0;
|
||||
hifn_reset_dma(dev, 1);
|
||||
|
||||
err = hifn_enable_crypto(dev);
|
||||
|
@ -1764,90 +1754,65 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
|
|||
return idx;
|
||||
}
|
||||
|
||||
static inline void hifn_complete_sa(struct hifn_device *dev, int i)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
dev->sa[i] = NULL;
|
||||
dev->started--;
|
||||
if (dev->started < 0)
|
||||
printk("%s: started: %d.\n", __func__, dev->started);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
BUG_ON(dev->started < 0);
|
||||
}
|
||||
|
||||
static void hifn_process_ready(struct ablkcipher_request *req, int error)
|
||||
{
|
||||
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct hifn_device *dev;
|
||||
struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
|
||||
|
||||
dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx);
|
||||
|
||||
dev = ctx->dev;
|
||||
dprintk("%s: req: %p, started: %d, sg_num: %d.\n",
|
||||
__func__, req, dev->started, atomic_read(&ctx->sg_num));
|
||||
|
||||
if (--dev->started < 0)
|
||||
BUG();
|
||||
|
||||
if (atomic_dec_and_test(&ctx->sg_num)) {
|
||||
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
|
||||
unsigned int nbytes = req->nbytes;
|
||||
int idx = 0, err;
|
||||
struct scatterlist *dst, *t;
|
||||
void *saddr;
|
||||
|
||||
if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
|
||||
while (nbytes) {
|
||||
t = &ctx->walk.cache[idx];
|
||||
dst = &req->dst[idx];
|
||||
while (nbytes) {
|
||||
t = &rctx->walk.cache[idx];
|
||||
dst = &req->dst[idx];
|
||||
|
||||
dprintk("\n%s: sg_page(t): %p, t->length: %u, "
|
||||
"sg_page(dst): %p, dst->length: %u, "
|
||||
"nbytes: %u.\n",
|
||||
__func__, sg_page(t), t->length,
|
||||
sg_page(dst), dst->length, nbytes);
|
||||
dprintk("\n%s: sg_page(t): %p, t->length: %u, "
|
||||
"sg_page(dst): %p, dst->length: %u, "
|
||||
"nbytes: %u.\n",
|
||||
__func__, sg_page(t), t->length,
|
||||
sg_page(dst), dst->length, nbytes);
|
||||
|
||||
if (!t->length) {
|
||||
nbytes -= min(dst->length, nbytes);
|
||||
idx++;
|
||||
continue;
|
||||
}
|
||||
|
||||
saddr = kmap_atomic(sg_page(t), KM_IRQ1);
|
||||
|
||||
err = ablkcipher_get(saddr, &t->length, t->offset,
|
||||
dst, nbytes, &nbytes);
|
||||
if (err < 0) {
|
||||
kunmap_atomic(saddr, KM_IRQ1);
|
||||
break;
|
||||
}
|
||||
|
||||
idx += err;
|
||||
kunmap_atomic(saddr, KM_IRQ1);
|
||||
if (!t->length) {
|
||||
nbytes -= min(dst->length, nbytes);
|
||||
idx++;
|
||||
continue;
|
||||
}
|
||||
|
||||
ablkcipher_walk_exit(&ctx->walk);
|
||||
saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
|
||||
|
||||
err = ablkcipher_get(saddr, &t->length, t->offset,
|
||||
dst, nbytes, &nbytes);
|
||||
if (err < 0) {
|
||||
kunmap_atomic(saddr, KM_SOFTIRQ0);
|
||||
break;
|
||||
}
|
||||
|
||||
idx += err;
|
||||
kunmap_atomic(saddr, KM_SOFTIRQ0);
|
||||
}
|
||||
|
||||
req->base.complete(&req->base, error);
|
||||
ablkcipher_walk_exit(&rctx->walk);
|
||||
}
|
||||
|
||||
req->base.complete(&req->base, error);
|
||||
}
|
||||
|
||||
static void hifn_check_for_completion(struct hifn_device *dev, int error)
|
||||
{
|
||||
int i;
|
||||
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
|
||||
|
||||
for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
|
||||
struct hifn_desc *d = &dma->resr[i];
|
||||
|
||||
if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) {
|
||||
dev->success++;
|
||||
dev->reset = 0;
|
||||
hifn_process_ready(dev->sa[i], error);
|
||||
dev->sa[i] = NULL;
|
||||
}
|
||||
|
||||
if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER))
|
||||
if (printk_ratelimit())
|
||||
printk("%s: overflow detected [d: %u, o: %u] "
|
||||
"at %d resr: l: %08x, p: %08x.\n",
|
||||
dev->name,
|
||||
!!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)),
|
||||
!!(d->l & __cpu_to_le32(HIFN_D_OVER)),
|
||||
i, d->l, d->p);
|
||||
}
|
||||
}
|
||||
|
||||
static void hifn_clear_rings(struct hifn_device *dev)
|
||||
static void hifn_clear_rings(struct hifn_device *dev, int error)
|
||||
{
|
||||
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
|
||||
int i, u;
|
||||
|
@ -1864,21 +1829,26 @@ static void hifn_clear_rings(struct hifn_device *dev)
|
|||
if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
|
||||
break;
|
||||
|
||||
if (i != HIFN_D_RES_RSIZE)
|
||||
u--;
|
||||
if (dev->sa[i]) {
|
||||
dev->success++;
|
||||
dev->reset = 0;
|
||||
hifn_process_ready(dev->sa[i], error);
|
||||
hifn_complete_sa(dev, i);
|
||||
}
|
||||
|
||||
if (++i == (HIFN_D_RES_RSIZE + 1))
|
||||
if (++i == HIFN_D_RES_RSIZE)
|
||||
i = 0;
|
||||
u--;
|
||||
}
|
||||
dma->resk = i; dma->resu = u;
|
||||
|
||||
i = dma->srck; u = dma->srcu;
|
||||
while (u != 0) {
|
||||
if (i == HIFN_D_SRC_RSIZE)
|
||||
i = 0;
|
||||
if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
|
||||
break;
|
||||
i++, u--;
|
||||
if (++i == HIFN_D_SRC_RSIZE)
|
||||
i = 0;
|
||||
u--;
|
||||
}
|
||||
dma->srck = i; dma->srcu = u;
|
||||
|
||||
|
@ -1886,20 +1856,19 @@ static void hifn_clear_rings(struct hifn_device *dev)
|
|||
while (u != 0) {
|
||||
if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
|
||||
break;
|
||||
if (i != HIFN_D_CMD_RSIZE)
|
||||
u--;
|
||||
if (++i == (HIFN_D_CMD_RSIZE + 1))
|
||||
if (++i == HIFN_D_CMD_RSIZE)
|
||||
i = 0;
|
||||
u--;
|
||||
}
|
||||
dma->cmdk = i; dma->cmdu = u;
|
||||
|
||||
i = dma->dstk; u = dma->dstu;
|
||||
while (u != 0) {
|
||||
if (i == HIFN_D_DST_RSIZE)
|
||||
i = 0;
|
||||
if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
|
||||
break;
|
||||
i++, u--;
|
||||
if (++i == HIFN_D_DST_RSIZE)
|
||||
i = 0;
|
||||
u--;
|
||||
}
|
||||
dma->dstk = i; dma->dstu = u;
|
||||
|
||||
|
@ -1944,30 +1913,39 @@ static void hifn_work(struct work_struct *work)
|
|||
} else
|
||||
dev->active--;
|
||||
|
||||
if (dev->prev_success == dev->success && dev->started)
|
||||
if ((dev->prev_success == dev->success) && dev->started)
|
||||
reset = 1;
|
||||
dev->prev_success = dev->success;
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
if (reset) {
|
||||
dprintk("%s: r: %08x, active: %d, started: %d, "
|
||||
"success: %lu: reset: %d.\n",
|
||||
dev->name, r, dev->active, dev->started,
|
||||
dev->success, reset);
|
||||
|
||||
if (++dev->reset >= 5) {
|
||||
dprintk("%s: really hard reset.\n", dev->name);
|
||||
int i;
|
||||
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
|
||||
|
||||
printk("%s: r: %08x, active: %d, started: %d, "
|
||||
"success: %lu: qlen: %u/%u, reset: %d.\n",
|
||||
dev->name, r, dev->active, dev->started,
|
||||
dev->success, dev->queue.qlen, dev->queue.max_qlen,
|
||||
reset);
|
||||
|
||||
printk("%s: res: ", __func__);
|
||||
for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
|
||||
printk("%x.%p ", dma->resr[i].l, dev->sa[i]);
|
||||
if (dev->sa[i]) {
|
||||
hifn_process_ready(dev->sa[i], -ENODEV);
|
||||
hifn_complete_sa(dev, i);
|
||||
}
|
||||
}
|
||||
printk("\n");
|
||||
|
||||
hifn_reset_dma(dev, 1);
|
||||
hifn_stop_device(dev);
|
||||
hifn_start_device(dev);
|
||||
dev->reset = 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
hifn_check_for_completion(dev, -EBUSY);
|
||||
hifn_clear_rings(dev);
|
||||
dev->started = 0;
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
}
|
||||
|
||||
schedule_delayed_work(&dev->work, HZ);
|
||||
|
@ -1984,8 +1962,8 @@ static irqreturn_t hifn_interrupt(int irq, void *data)
|
|||
dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
|
||||
"i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
|
||||
dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
|
||||
dma->cmdu, dma->srcu, dma->dstu, dma->resu,
|
||||
dma->cmdi, dma->srci, dma->dsti, dma->resi);
|
||||
dma->cmdi, dma->srci, dma->dsti, dma->resi,
|
||||
dma->cmdu, dma->srcu, dma->dstu, dma->resu);
|
||||
|
||||
if ((dmacsr & dev->dmareg) == 0)
|
||||
return IRQ_NONE;
|
||||
|
@ -2002,11 +1980,10 @@ static irqreturn_t hifn_interrupt(int irq, void *data)
|
|||
if (restart) {
|
||||
u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
|
||||
|
||||
if (printk_ratelimit())
|
||||
printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
|
||||
dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
|
||||
!!(dmacsr & HIFN_DMACSR_D_OVER),
|
||||
puisr, !!(puisr & HIFN_PUISR_DSTOVER));
|
||||
printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
|
||||
dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
|
||||
!!(dmacsr & HIFN_DMACSR_D_OVER),
|
||||
puisr, !!(puisr & HIFN_PUISR_DSTOVER));
|
||||
if (!!(puisr & HIFN_PUISR_DSTOVER))
|
||||
hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
|
||||
hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
|
||||
|
@ -2016,12 +1993,11 @@ static irqreturn_t hifn_interrupt(int irq, void *data)
|
|||
restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
|
||||
HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
|
||||
if (restart) {
|
||||
if (printk_ratelimit())
|
||||
printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
|
||||
dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
|
||||
!!(dmacsr & HIFN_DMACSR_S_ABORT),
|
||||
!!(dmacsr & HIFN_DMACSR_D_ABORT),
|
||||
!!(dmacsr & HIFN_DMACSR_R_ABORT));
|
||||
printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
|
||||
dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
|
||||
!!(dmacsr & HIFN_DMACSR_S_ABORT),
|
||||
!!(dmacsr & HIFN_DMACSR_D_ABORT),
|
||||
!!(dmacsr & HIFN_DMACSR_R_ABORT));
|
||||
hifn_reset_dma(dev, 1);
|
||||
hifn_init_dma(dev);
|
||||
hifn_init_registers(dev);
|
||||
|
@ -2034,7 +2010,6 @@ static irqreturn_t hifn_interrupt(int irq, void *data)
|
|||
}
|
||||
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
hifn_clear_rings(dev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -2048,21 +2023,25 @@ static void hifn_flush(struct hifn_device *dev)
|
|||
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
|
||||
struct hifn_desc *d = &dma->resr[i];
|
||||
|
||||
if (dev->sa[i]) {
|
||||
hifn_process_ready(dev->sa[i],
|
||||
(d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
|
||||
hifn_complete_sa(dev, i);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
while ((async_req = crypto_dequeue_request(&dev->queue))) {
|
||||
ctx = crypto_tfm_ctx(async_req->tfm);
|
||||
req = container_of(async_req, struct ablkcipher_request, base);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
hifn_process_ready(req, -ENODEV);
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
}
|
||||
|
@ -2121,6 +2100,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
|
|||
u8 type, u8 mode)
|
||||
{
|
||||
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
|
||||
unsigned ivsize;
|
||||
|
||||
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
|
||||
|
@ -2141,11 +2121,11 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
|
|||
type = ACRYPTO_TYPE_AES_256;
|
||||
}
|
||||
|
||||
ctx->op = op;
|
||||
ctx->mode = mode;
|
||||
ctx->type = type;
|
||||
ctx->iv = req->info;
|
||||
ctx->ivsize = ivsize;
|
||||
rctx->op = op;
|
||||
rctx->mode = mode;
|
||||
rctx->type = type;
|
||||
rctx->iv = req->info;
|
||||
rctx->ivsize = ivsize;
|
||||
|
||||
/*
|
||||
* HEAVY TODO: needs to kick Herbert XU to write documentation.
|
||||
|
@ -2158,7 +2138,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
|
|||
|
||||
static int hifn_process_queue(struct hifn_device *dev)
|
||||
{
|
||||
struct crypto_async_request *async_req;
|
||||
struct crypto_async_request *async_req, *backlog;
|
||||
struct hifn_context *ctx;
|
||||
struct ablkcipher_request *req;
|
||||
unsigned long flags;
|
||||
|
@ -2166,12 +2146,16 @@ static int hifn_process_queue(struct hifn_device *dev)
|
|||
|
||||
while (dev->started < HIFN_QUEUE_LENGTH) {
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
backlog = crypto_get_backlog(&dev->queue);
|
||||
async_req = crypto_dequeue_request(&dev->queue);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
if (!async_req)
|
||||
break;
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
||||
ctx = crypto_tfm_ctx(async_req->tfm);
|
||||
req = container_of(async_req, struct ablkcipher_request, base);
|
||||
|
||||
|
@ -2496,7 +2480,7 @@ static int hifn_cra_init(struct crypto_tfm *tfm)
|
|||
struct hifn_context *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
ctx->dev = ha->dev;
|
||||
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2574,7 +2558,10 @@ static void hifn_tasklet_callback(unsigned long data)
|
|||
* (like dev->success), but they are used in process
|
||||
* context or update is atomic (like setting dev->sa[i] to NULL).
|
||||
*/
|
||||
hifn_check_for_completion(dev, 0);
|
||||
hifn_clear_rings(dev, 0);
|
||||
|
||||
if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
|
||||
hifn_process_queue(dev);
|
||||
}
|
||||
|
||||
static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
@ -2631,22 +2618,11 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
goto err_out_unmap_bars;
|
||||
}
|
||||
|
||||
dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER);
|
||||
if (!dev->result_mem) {
|
||||
dprintk("Failed to allocate %d pages for result_mem.\n",
|
||||
HIFN_MAX_RESULT_ORDER);
|
||||
goto err_out_unmap_bars;
|
||||
}
|
||||
memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER));
|
||||
|
||||
dev->dst = pci_map_single(pdev, (void *)dev->result_mem,
|
||||
PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE);
|
||||
|
||||
dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
|
||||
&dev->desc_dma);
|
||||
if (!dev->desc_virt) {
|
||||
dprintk("Failed to allocate descriptor rings.\n");
|
||||
goto err_out_free_result_pages;
|
||||
goto err_out_unmap_bars;
|
||||
}
|
||||
memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
|
||||
|
||||
|
@ -2706,11 +2682,6 @@ err_out_free_desc:
|
|||
pci_free_consistent(pdev, sizeof(struct hifn_dma),
|
||||
dev->desc_virt, dev->desc_dma);
|
||||
|
||||
err_out_free_result_pages:
|
||||
pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
|
||||
|
||||
err_out_unmap_bars:
|
||||
for (i=0; i<3; ++i)
|
||||
if (dev->bar[i])
|
||||
|
@ -2748,10 +2719,6 @@ static void hifn_remove(struct pci_dev *pdev)
|
|||
|
||||
pci_free_consistent(pdev, sizeof(struct hifn_dma),
|
||||
dev->desc_virt, dev->desc_dma);
|
||||
pci_unmap_single(pdev, dev->dst,
|
||||
PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
|
||||
for (i=0; i<3; ++i)
|
||||
if (dev->bar[i])
|
||||
iounmap(dev->bar[i]);
|
||||
|
@ -2782,6 +2749,11 @@ static int __devinit hifn_init(void)
|
|||
unsigned int freq;
|
||||
int err;
|
||||
|
||||
if (sizeof(dma_addr_t) > 4) {
|
||||
printk(KERN_INFO "HIFN supports only 32-bit addresses.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (strncmp(hifn_pll_ref, "ext", 3) &&
|
||||
strncmp(hifn_pll_ref, "pci", 3)) {
|
||||
printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, "
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/i387.h>
|
||||
#include "padlock.h"
|
||||
|
@ -49,6 +51,8 @@ struct aes_ctx {
|
|||
u32 *D;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct cword *, last_cword);
|
||||
|
||||
/* Tells whether the ACE is capable to generate
|
||||
the extended key for a given key_len. */
|
||||
static inline int
|
||||
|
@ -89,6 +93,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
const __le32 *key = (const __le32 *)in_key;
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
struct crypto_aes_ctx gen_aes;
|
||||
int cpu;
|
||||
|
||||
if (key_len % 8) {
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
|
@ -118,7 +123,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
|
||||
/* Don't generate extended keys if the hardware can do it. */
|
||||
if (aes_hw_extkey_available(key_len))
|
||||
return 0;
|
||||
goto ok;
|
||||
|
||||
ctx->D = ctx->d_data;
|
||||
ctx->cword.encrypt.keygen = 1;
|
||||
|
@ -131,15 +136,30 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
|
||||
memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
|
||||
memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
|
||||
|
||||
ok:
|
||||
for_each_online_cpu(cpu)
|
||||
if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
|
||||
&ctx->cword.decrypt == per_cpu(last_cword, cpu))
|
||||
per_cpu(last_cword, cpu) = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ====== Encryption/decryption routines ====== */
|
||||
|
||||
/* These are the real call to PadLock. */
|
||||
static inline void padlock_reset_key(void)
|
||||
static inline void padlock_reset_key(struct cword *cword)
|
||||
{
|
||||
asm volatile ("pushfl; popfl");
|
||||
int cpu = raw_smp_processor_id();
|
||||
|
||||
if (cword != per_cpu(last_cword, cpu))
|
||||
asm volatile ("pushfl; popfl");
|
||||
}
|
||||
|
||||
static inline void padlock_store_cword(struct cword *cword)
|
||||
{
|
||||
per_cpu(last_cword, raw_smp_processor_id()) = cword;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -149,7 +169,7 @@ static inline void padlock_reset_key(void)
|
|||
*/
|
||||
|
||||
static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
|
||||
void *control_word)
|
||||
struct cword *control_word)
|
||||
{
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
|
||||
: "+S"(input), "+D"(output)
|
||||
|
@ -213,22 +233,24 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|||
{
|
||||
struct aes_ctx *ctx = aes_ctx(tfm);
|
||||
int ts_state;
|
||||
padlock_reset_key();
|
||||
|
||||
padlock_reset_key(&ctx->cword.encrypt);
|
||||
ts_state = irq_ts_save();
|
||||
aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
|
||||
irq_ts_restore(ts_state);
|
||||
padlock_store_cword(&ctx->cword.encrypt);
|
||||
}
|
||||
|
||||
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct aes_ctx *ctx = aes_ctx(tfm);
|
||||
int ts_state;
|
||||
padlock_reset_key();
|
||||
|
||||
padlock_reset_key(&ctx->cword.encrypt);
|
||||
ts_state = irq_ts_save();
|
||||
aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
|
||||
irq_ts_restore(ts_state);
|
||||
padlock_store_cword(&ctx->cword.encrypt);
|
||||
}
|
||||
|
||||
static struct crypto_alg aes_alg = {
|
||||
|
@ -261,7 +283,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
|
|||
int err;
|
||||
int ts_state;
|
||||
|
||||
padlock_reset_key();
|
||||
padlock_reset_key(&ctx->cword.encrypt);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
@ -276,6 +298,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
|
|||
}
|
||||
irq_ts_restore(ts_state);
|
||||
|
||||
padlock_store_cword(&ctx->cword.encrypt);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -288,7 +312,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
|||
int err;
|
||||
int ts_state;
|
||||
|
||||
padlock_reset_key();
|
||||
padlock_reset_key(&ctx->cword.decrypt);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
@ -302,6 +326,9 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
|||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
irq_ts_restore(ts_state);
|
||||
|
||||
padlock_store_cword(&ctx->cword.encrypt);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -336,7 +363,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
|||
int err;
|
||||
int ts_state;
|
||||
|
||||
padlock_reset_key();
|
||||
padlock_reset_key(&ctx->cword.encrypt);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
@ -353,6 +380,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
|||
}
|
||||
irq_ts_restore(ts_state);
|
||||
|
||||
padlock_store_cword(&ctx->cword.decrypt);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -365,7 +394,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
|||
int err;
|
||||
int ts_state;
|
||||
|
||||
padlock_reset_key();
|
||||
padlock_reset_key(&ctx->cword.encrypt);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
@ -380,6 +409,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
|||
}
|
||||
|
||||
irq_ts_restore(ts_state);
|
||||
|
||||
padlock_store_cword(&ctx->cword.encrypt);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -127,7 +127,6 @@ struct talitos_private {
|
|||
|
||||
/* request callback tasklet */
|
||||
struct tasklet_struct done_task;
|
||||
struct tasklet_struct error_task;
|
||||
|
||||
/* list of registered algorithms */
|
||||
struct list_head alg_list;
|
||||
|
@ -138,6 +137,7 @@ struct talitos_private {
|
|||
|
||||
/* .features flag */
|
||||
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
|
||||
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
|
||||
|
||||
/*
|
||||
* map virtual single (contiguous) pointer to h/w descriptor pointer
|
||||
|
@ -184,6 +184,11 @@ static int reset_channel(struct device *dev, int ch)
|
|||
setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
|
||||
TALITOS_CCCR_LO_CDIE);
|
||||
|
||||
/* and ICCR writeback, if available */
|
||||
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
|
||||
setbits32(priv->reg + TALITOS_CCCR_LO(ch),
|
||||
TALITOS_CCCR_LO_IWSE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -239,6 +244,11 @@ static int init_device(struct device *dev)
|
|||
setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
|
||||
setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
|
||||
|
||||
/* disable integrity check error interrupts (use writeback instead) */
|
||||
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
|
||||
setbits32(priv->reg + TALITOS_MDEUICR_LO,
|
||||
TALITOS_MDEUICR_LO_ICE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -370,6 +380,12 @@ static void talitos_done(unsigned long data)
|
|||
|
||||
for (ch = 0; ch < priv->num_channels; ch++)
|
||||
flush_channel(dev, ch, 0, 0);
|
||||
|
||||
/* At this point, all completed channels have been processed.
|
||||
* Unmask done interrupts for channels completed later on.
|
||||
*/
|
||||
setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
|
||||
setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -469,16 +485,13 @@ static void report_eu_error(struct device *dev, int ch, struct talitos_desc *des
|
|||
/*
|
||||
* recover from error interrupts
|
||||
*/
|
||||
static void talitos_error(unsigned long data)
|
||||
static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
|
||||
{
|
||||
struct device *dev = (struct device *)data;
|
||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||
unsigned int timeout = TALITOS_TIMEOUT;
|
||||
int ch, error, reset_dev = 0, reset_ch = 0;
|
||||
u32 isr, isr_lo, v, v_lo;
|
||||
|
||||
isr = in_be32(priv->reg + TALITOS_ISR);
|
||||
isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
|
||||
u32 v, v_lo;
|
||||
|
||||
for (ch = 0; ch < priv->num_channels; ch++) {
|
||||
/* skip channels without errors */
|
||||
|
@ -560,16 +573,19 @@ static irqreturn_t talitos_interrupt(int irq, void *data)
|
|||
|
||||
isr = in_be32(priv->reg + TALITOS_ISR);
|
||||
isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
|
||||
|
||||
/* ack */
|
||||
/* Acknowledge interrupt */
|
||||
out_be32(priv->reg + TALITOS_ICR, isr);
|
||||
out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
|
||||
|
||||
if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
|
||||
talitos_error((unsigned long)data);
|
||||
talitos_error((unsigned long)data, isr, isr_lo);
|
||||
else
|
||||
if (likely(isr & TALITOS_ISR_CHDONE))
|
||||
if (likely(isr & TALITOS_ISR_CHDONE)) {
|
||||
/* mask further done interrupts. */
|
||||
clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
|
||||
/* done_task will unmask done interrupts at exit */
|
||||
tasklet_schedule(&priv->done_task);
|
||||
}
|
||||
|
||||
return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
@ -802,7 +818,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
|
|||
aead_request_complete(areq, err);
|
||||
}
|
||||
|
||||
static void ipsec_esp_decrypt_done(struct device *dev,
|
||||
static void ipsec_esp_decrypt_swauth_done(struct device *dev,
|
||||
struct talitos_desc *desc, void *context,
|
||||
int err)
|
||||
{
|
||||
|
@ -834,6 +850,27 @@ static void ipsec_esp_decrypt_done(struct device *dev,
|
|||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
|
||||
struct talitos_desc *desc, void *context,
|
||||
int err)
|
||||
{
|
||||
struct aead_request *req = context;
|
||||
struct ipsec_esp_edesc *edesc =
|
||||
container_of(desc, struct ipsec_esp_edesc, desc);
|
||||
|
||||
ipsec_esp_unmap(dev, edesc, req);
|
||||
|
||||
/* check ICV auth status */
|
||||
if (!err)
|
||||
if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
|
||||
DESC_HDR_LO_ICCR1_PASS)
|
||||
err = -EBADMSG;
|
||||
|
||||
kfree(edesc);
|
||||
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* convert scatterlist to SEC h/w link table format
|
||||
* stop at cryptlen bytes
|
||||
|
@ -887,6 +924,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
|
|||
unsigned int authsize = ctx->authsize;
|
||||
unsigned int ivsize;
|
||||
int sg_count, ret;
|
||||
int sg_link_tbl_len;
|
||||
|
||||
/* hmac key */
|
||||
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
|
||||
|
@ -924,33 +962,19 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
|
|||
if (sg_count == 1) {
|
||||
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
|
||||
} else {
|
||||
sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
|
||||
sg_link_tbl_len = cryptlen;
|
||||
|
||||
if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) &&
|
||||
(edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
|
||||
sg_link_tbl_len = cryptlen + authsize;
|
||||
}
|
||||
sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
|
||||
&edesc->link_tbl[0]);
|
||||
if (sg_count > 1) {
|
||||
struct talitos_ptr *link_tbl_ptr =
|
||||
&edesc->link_tbl[sg_count-1];
|
||||
struct scatterlist *sg;
|
||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||
|
||||
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
|
||||
desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
|
||||
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
|
||||
edesc->dma_len, DMA_BIDIRECTIONAL);
|
||||
/* If necessary for this SEC revision,
|
||||
* add a link table entry for ICV.
|
||||
*/
|
||||
if ((priv->features &
|
||||
TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) &&
|
||||
(edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
|
||||
link_tbl_ptr->j_extent = 0;
|
||||
link_tbl_ptr++;
|
||||
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
|
||||
link_tbl_ptr->len = cpu_to_be16(authsize);
|
||||
sg = sg_last(areq->src, edesc->src_nents ? : 1);
|
||||
link_tbl_ptr->ptr = cpu_to_be32(
|
||||
(char *)sg_dma_address(sg)
|
||||
+ sg->length - authsize);
|
||||
}
|
||||
} else {
|
||||
/* Only one segment now, so no link tbl needed */
|
||||
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
|
||||
|
@ -975,13 +999,9 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
|
|||
desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
|
||||
edesc->dma_link_tbl +
|
||||
edesc->src_nents + 1);
|
||||
if (areq->src == areq->dst) {
|
||||
memcpy(link_tbl_ptr, &edesc->link_tbl[0],
|
||||
edesc->src_nents * sizeof(struct talitos_ptr));
|
||||
} else {
|
||||
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
|
||||
link_tbl_ptr);
|
||||
}
|
||||
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
|
||||
link_tbl_ptr);
|
||||
|
||||
/* Add an entry to the link table for ICV data */
|
||||
link_tbl_ptr += sg_count - 1;
|
||||
link_tbl_ptr->j_extent = 0;
|
||||
|
@ -1106,11 +1126,14 @@ static int aead_authenc_encrypt(struct aead_request *req)
|
|||
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int aead_authenc_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
unsigned int authsize = ctx->authsize;
|
||||
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
|
||||
struct ipsec_esp_edesc *edesc;
|
||||
struct scatterlist *sg;
|
||||
void *icvdata;
|
||||
|
@ -1122,22 +1145,39 @@ static int aead_authenc_decrypt(struct aead_request *req)
|
|||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/* stash incoming ICV for later cmp with ICV generated by the h/w */
|
||||
if (edesc->dma_len)
|
||||
icvdata = &edesc->link_tbl[edesc->src_nents +
|
||||
edesc->dst_nents + 2];
|
||||
else
|
||||
icvdata = &edesc->link_tbl[0];
|
||||
if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
|
||||
(((!edesc->src_nents && !edesc->dst_nents) ||
|
||||
priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) {
|
||||
|
||||
sg = sg_last(req->src, edesc->src_nents ? : 1);
|
||||
/* decrypt and check the ICV */
|
||||
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND |
|
||||
DESC_HDR_MODE1_MDEU_CICV;
|
||||
|
||||
memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
|
||||
ctx->authsize);
|
||||
/* reset integrity check result bits */
|
||||
edesc->desc.hdr_lo = 0;
|
||||
|
||||
/* decrypt */
|
||||
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
|
||||
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done);
|
||||
|
||||
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done);
|
||||
} else {
|
||||
|
||||
/* Have to check the ICV with software */
|
||||
|
||||
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
|
||||
|
||||
/* stash incoming ICV for later cmp with ICV generated by the h/w */
|
||||
if (edesc->dma_len)
|
||||
icvdata = &edesc->link_tbl[edesc->src_nents +
|
||||
edesc->dst_nents + 2];
|
||||
else
|
||||
icvdata = &edesc->link_tbl[0];
|
||||
|
||||
sg = sg_last(req->src, edesc->src_nents ? : 1);
|
||||
|
||||
memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
|
||||
ctx->authsize);
|
||||
|
||||
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
|
||||
}
|
||||
}
|
||||
|
||||
static int aead_authenc_givencrypt(
|
||||
|
@ -1391,7 +1431,6 @@ static int talitos_remove(struct of_device *ofdev)
|
|||
}
|
||||
|
||||
tasklet_kill(&priv->done_task);
|
||||
tasklet_kill(&priv->error_task);
|
||||
|
||||
iounmap(priv->reg);
|
||||
|
||||
|
@ -1451,10 +1490,9 @@ static int talitos_probe(struct of_device *ofdev,
|
|||
|
||||
priv->ofdev = ofdev;
|
||||
|
||||
INIT_LIST_HEAD(&priv->alg_list);
|
||||
|
||||
tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
|
||||
tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev);
|
||||
|
||||
INIT_LIST_HEAD(&priv->alg_list);
|
||||
|
||||
priv->irq = irq_of_parse_and_map(np, 0);
|
||||
|
||||
|
@ -1508,6 +1546,9 @@ static int talitos_probe(struct of_device *ofdev,
|
|||
if (of_device_is_compatible(np, "fsl,sec3.0"))
|
||||
priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,sec2.1"))
|
||||
priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
|
||||
|
||||
priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
|
||||
GFP_KERNEL);
|
||||
priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
|
||||
|
@ -1551,7 +1592,7 @@ static int talitos_probe(struct of_device *ofdev,
|
|||
goto err_out;
|
||||
}
|
||||
for (i = 0; i < priv->num_channels; i++)
|
||||
atomic_set(&priv->submit_count[i], -priv->chfifo_len);
|
||||
atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1));
|
||||
|
||||
priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
|
||||
priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
|
||||
|
|
|
@ -37,7 +37,8 @@
|
|||
#define TALITOS_MCR_LO 0x1038
|
||||
#define TALITOS_MCR_SWR 0x1 /* s/w reset */
|
||||
#define TALITOS_IMR 0x1008 /* interrupt mask register */
|
||||
#define TALITOS_IMR_INIT 0x10fff /* enable channel IRQs */
|
||||
#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */
|
||||
#define TALITOS_IMR_DONE 0x00055 /* done IRQs */
|
||||
#define TALITOS_IMR_LO 0x100C
|
||||
#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */
|
||||
#define TALITOS_ISR 0x1010 /* interrupt status register */
|
||||
|
@ -55,6 +56,7 @@
|
|||
#define TALITOS_CCCR_CONT 0x2 /* channel continue */
|
||||
#define TALITOS_CCCR_RESET 0x1 /* channel reset */
|
||||
#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
|
||||
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
|
||||
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
|
||||
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
|
||||
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
|
||||
|
@ -102,6 +104,9 @@
|
|||
#define TALITOS_AESUISR_LO 0x4034
|
||||
#define TALITOS_MDEUISR 0x6030 /* message digest unit */
|
||||
#define TALITOS_MDEUISR_LO 0x6034
|
||||
#define TALITOS_MDEUICR 0x6038 /* interrupt control */
|
||||
#define TALITOS_MDEUICR_LO 0x603c
|
||||
#define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */
|
||||
#define TALITOS_AFEUISR 0x8030 /* arc4 unit */
|
||||
#define TALITOS_AFEUISR_LO 0x8034
|
||||
#define TALITOS_RNGUISR 0xa030 /* random number unit */
|
||||
|
@ -129,31 +134,34 @@
|
|||
*/
|
||||
|
||||
/* written back when done */
|
||||
#define DESC_HDR_DONE __constant_cpu_to_be32(0xff000000)
|
||||
#define DESC_HDR_DONE cpu_to_be32(0xff000000)
|
||||
#define DESC_HDR_LO_ICCR1_MASK cpu_to_be32(0x00180000)
|
||||
#define DESC_HDR_LO_ICCR1_PASS cpu_to_be32(0x00080000)
|
||||
#define DESC_HDR_LO_ICCR1_FAIL cpu_to_be32(0x00100000)
|
||||
|
||||
/* primary execution unit select */
|
||||
#define DESC_HDR_SEL0_MASK __constant_cpu_to_be32(0xf0000000)
|
||||
#define DESC_HDR_SEL0_AFEU __constant_cpu_to_be32(0x10000000)
|
||||
#define DESC_HDR_SEL0_DEU __constant_cpu_to_be32(0x20000000)
|
||||
#define DESC_HDR_SEL0_MDEUA __constant_cpu_to_be32(0x30000000)
|
||||
#define DESC_HDR_SEL0_MDEUB __constant_cpu_to_be32(0xb0000000)
|
||||
#define DESC_HDR_SEL0_RNG __constant_cpu_to_be32(0x40000000)
|
||||
#define DESC_HDR_SEL0_PKEU __constant_cpu_to_be32(0x50000000)
|
||||
#define DESC_HDR_SEL0_AESU __constant_cpu_to_be32(0x60000000)
|
||||
#define DESC_HDR_SEL0_KEU __constant_cpu_to_be32(0x70000000)
|
||||
#define DESC_HDR_SEL0_CRCU __constant_cpu_to_be32(0x80000000)
|
||||
#define DESC_HDR_SEL0_MASK cpu_to_be32(0xf0000000)
|
||||
#define DESC_HDR_SEL0_AFEU cpu_to_be32(0x10000000)
|
||||
#define DESC_HDR_SEL0_DEU cpu_to_be32(0x20000000)
|
||||
#define DESC_HDR_SEL0_MDEUA cpu_to_be32(0x30000000)
|
||||
#define DESC_HDR_SEL0_MDEUB cpu_to_be32(0xb0000000)
|
||||
#define DESC_HDR_SEL0_RNG cpu_to_be32(0x40000000)
|
||||
#define DESC_HDR_SEL0_PKEU cpu_to_be32(0x50000000)
|
||||
#define DESC_HDR_SEL0_AESU cpu_to_be32(0x60000000)
|
||||
#define DESC_HDR_SEL0_KEU cpu_to_be32(0x70000000)
|
||||
#define DESC_HDR_SEL0_CRCU cpu_to_be32(0x80000000)
|
||||
|
||||
/* primary execution unit mode (MODE0) and derivatives */
|
||||
#define DESC_HDR_MODE0_ENCRYPT __constant_cpu_to_be32(0x00100000)
|
||||
#define DESC_HDR_MODE0_AESU_CBC __constant_cpu_to_be32(0x00200000)
|
||||
#define DESC_HDR_MODE0_DEU_CBC __constant_cpu_to_be32(0x00400000)
|
||||
#define DESC_HDR_MODE0_DEU_3DES __constant_cpu_to_be32(0x00200000)
|
||||
#define DESC_HDR_MODE0_MDEU_INIT __constant_cpu_to_be32(0x01000000)
|
||||
#define DESC_HDR_MODE0_MDEU_HMAC __constant_cpu_to_be32(0x00800000)
|
||||
#define DESC_HDR_MODE0_MDEU_PAD __constant_cpu_to_be32(0x00400000)
|
||||
#define DESC_HDR_MODE0_MDEU_MD5 __constant_cpu_to_be32(0x00200000)
|
||||
#define DESC_HDR_MODE0_MDEU_SHA256 __constant_cpu_to_be32(0x00100000)
|
||||
#define DESC_HDR_MODE0_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
|
||||
#define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000)
|
||||
#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
|
||||
#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
|
||||
#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000)
|
||||
#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000)
|
||||
#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000)
|
||||
#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000)
|
||||
#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000)
|
||||
#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000)
|
||||
#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000)
|
||||
#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
|
||||
DESC_HDR_MODE0_MDEU_HMAC)
|
||||
#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
|
||||
|
@ -162,18 +170,19 @@
|
|||
DESC_HDR_MODE0_MDEU_HMAC)
|
||||
|
||||
/* secondary execution unit select (SEL1) */
|
||||
#define DESC_HDR_SEL1_MASK __constant_cpu_to_be32(0x000f0000)
|
||||
#define DESC_HDR_SEL1_MDEUA __constant_cpu_to_be32(0x00030000)
|
||||
#define DESC_HDR_SEL1_MDEUB __constant_cpu_to_be32(0x000b0000)
|
||||
#define DESC_HDR_SEL1_CRCU __constant_cpu_to_be32(0x00080000)
|
||||
#define DESC_HDR_SEL1_MASK cpu_to_be32(0x000f0000)
|
||||
#define DESC_HDR_SEL1_MDEUA cpu_to_be32(0x00030000)
|
||||
#define DESC_HDR_SEL1_MDEUB cpu_to_be32(0x000b0000)
|
||||
#define DESC_HDR_SEL1_CRCU cpu_to_be32(0x00080000)
|
||||
|
||||
/* secondary execution unit mode (MODE1) and derivatives */
|
||||
#define DESC_HDR_MODE1_MDEU_INIT __constant_cpu_to_be32(0x00001000)
|
||||
#define DESC_HDR_MODE1_MDEU_HMAC __constant_cpu_to_be32(0x00000800)
|
||||
#define DESC_HDR_MODE1_MDEU_PAD __constant_cpu_to_be32(0x00000400)
|
||||
#define DESC_HDR_MODE1_MDEU_MD5 __constant_cpu_to_be32(0x00000200)
|
||||
#define DESC_HDR_MODE1_MDEU_SHA256 __constant_cpu_to_be32(0x00000100)
|
||||
#define DESC_HDR_MODE1_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
|
||||
#define DESC_HDR_MODE1_MDEU_CICV cpu_to_be32(0x00004000)
|
||||
#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000)
|
||||
#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800)
|
||||
#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400)
|
||||
#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200)
|
||||
#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100)
|
||||
#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000)
|
||||
#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
|
||||
DESC_HDR_MODE1_MDEU_HMAC)
|
||||
#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
|
||||
|
@ -182,16 +191,16 @@
|
|||
DESC_HDR_MODE1_MDEU_HMAC)
|
||||
|
||||
/* direction of overall data flow (DIR) */
|
||||
#define DESC_HDR_DIR_INBOUND __constant_cpu_to_be32(0x00000002)
|
||||
#define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002)
|
||||
|
||||
/* request done notification (DN) */
|
||||
#define DESC_HDR_DONE_NOTIFY __constant_cpu_to_be32(0x00000001)
|
||||
#define DESC_HDR_DONE_NOTIFY cpu_to_be32(0x00000001)
|
||||
|
||||
/* descriptor types */
|
||||
#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP __constant_cpu_to_be32(0 << 3)
|
||||
#define DESC_HDR_TYPE_IPSEC_ESP __constant_cpu_to_be32(1 << 3)
|
||||
#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU __constant_cpu_to_be32(2 << 3)
|
||||
#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU __constant_cpu_to_be32(4 << 3)
|
||||
#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP cpu_to_be32(0 << 3)
|
||||
#define DESC_HDR_TYPE_IPSEC_ESP cpu_to_be32(1 << 3)
|
||||
#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU cpu_to_be32(2 << 3)
|
||||
#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU cpu_to_be32(4 << 3)
|
||||
|
||||
/* link table extent field bits */
|
||||
#define DESC_PTR_LNKTBL_JUMP 0x80
|
||||
|
|
|
@ -23,10 +23,10 @@ struct crypto_aes_ctx {
|
|||
u32 key_dec[AES_MAX_KEYLENGTH_U32];
|
||||
};
|
||||
|
||||
extern u32 crypto_ft_tab[4][256];
|
||||
extern u32 crypto_fl_tab[4][256];
|
||||
extern u32 crypto_it_tab[4][256];
|
||||
extern u32 crypto_il_tab[4][256];
|
||||
extern const u32 crypto_ft_tab[4][256];
|
||||
extern const u32 crypto_fl_tab[4][256];
|
||||
extern const u32 crypto_it_tab[4][256];
|
||||
extern const u32 crypto_il_tab[4][256];
|
||||
|
||||
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len);
|
||||
|
|
|
@ -22,9 +22,18 @@ struct seq_file;
|
|||
|
||||
struct crypto_type {
|
||||
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
|
||||
unsigned int (*extsize)(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend);
|
||||
int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
|
||||
void (*exit)(struct crypto_tfm *tfm);
|
||||
int (*init_tfm)(struct crypto_tfm *tfm,
|
||||
const struct crypto_type *frontend);
|
||||
void (*show)(struct seq_file *m, struct crypto_alg *alg);
|
||||
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
|
||||
|
||||
unsigned int type;
|
||||
unsigned int maskclear;
|
||||
unsigned int maskset;
|
||||
unsigned int tfmsize;
|
||||
};
|
||||
|
||||
struct crypto_instance {
|
||||
|
@ -239,6 +248,11 @@ static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
|
|||
return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
|
||||
}
|
||||
|
||||
static inline void *crypto_hash_ctx(struct crypto_hash *tfm)
|
||||
{
|
||||
return crypto_tfm_ctx(&tfm->base);
|
||||
}
|
||||
|
||||
static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
|
||||
{
|
||||
return crypto_tfm_ctx_aligned(&tfm->base);
|
||||
|
|
|
@ -15,10 +15,40 @@
|
|||
|
||||
#include <linux/crypto.h>
|
||||
|
||||
struct shash_desc {
|
||||
struct crypto_shash *tfm;
|
||||
u32 flags;
|
||||
|
||||
void *__ctx[] CRYPTO_MINALIGN_ATTR;
|
||||
};
|
||||
|
||||
struct shash_alg {
|
||||
int (*init)(struct shash_desc *desc);
|
||||
int (*reinit)(struct shash_desc *desc);
|
||||
int (*update)(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len);
|
||||
int (*final)(struct shash_desc *desc, u8 *out);
|
||||
int (*finup)(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out);
|
||||
int (*digest)(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out);
|
||||
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
|
||||
unsigned int descsize;
|
||||
unsigned int digestsize;
|
||||
|
||||
struct crypto_alg base;
|
||||
};
|
||||
|
||||
struct crypto_ahash {
|
||||
struct crypto_tfm base;
|
||||
};
|
||||
|
||||
struct crypto_shash {
|
||||
struct crypto_tfm base;
|
||||
};
|
||||
|
||||
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
|
||||
{
|
||||
return (struct crypto_ahash *)tfm;
|
||||
|
@ -87,6 +117,11 @@ static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
|
|||
return crypto_ahash_crt(tfm)->reqsize;
|
||||
}
|
||||
|
||||
static inline void *ahash_request_ctx(struct ahash_request *req)
|
||||
{
|
||||
return req->__ctx;
|
||||
}
|
||||
|
||||
static inline int crypto_ahash_setkey(struct crypto_ahash *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
|
@ -101,6 +136,14 @@ static inline int crypto_ahash_digest(struct ahash_request *req)
|
|||
return crt->digest(req);
|
||||
}
|
||||
|
||||
static inline void crypto_ahash_export(struct ahash_request *req, u8 *out)
|
||||
{
|
||||
memcpy(out, ahash_request_ctx(req),
|
||||
crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
|
||||
}
|
||||
|
||||
int crypto_ahash_import(struct ahash_request *req, const u8 *in);
|
||||
|
||||
static inline int crypto_ahash_init(struct ahash_request *req)
|
||||
{
|
||||
struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
|
||||
|
@ -169,4 +212,86 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
|
|||
req->result = result;
|
||||
}
|
||||
|
||||
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
|
||||
u32 mask);
|
||||
|
||||
static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
|
||||
{
|
||||
return &tfm->base;
|
||||
}
|
||||
|
||||
static inline void crypto_free_shash(struct crypto_shash *tfm)
|
||||
{
|
||||
crypto_free_tfm(crypto_shash_tfm(tfm));
|
||||
}
|
||||
|
||||
static inline unsigned int crypto_shash_alignmask(
|
||||
struct crypto_shash *tfm)
|
||||
{
|
||||
return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
|
||||
}
|
||||
|
||||
static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
|
||||
{
|
||||
return container_of(alg, struct shash_alg, base);
|
||||
}
|
||||
|
||||
static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
|
||||
{
|
||||
return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
|
||||
}
|
||||
|
||||
static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
|
||||
{
|
||||
return crypto_shash_alg(tfm)->digestsize;
|
||||
}
|
||||
|
||||
static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
|
||||
{
|
||||
return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
|
||||
}
|
||||
|
||||
static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags)
|
||||
{
|
||||
crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags);
|
||||
}
|
||||
|
||||
static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
|
||||
{
|
||||
crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
|
||||
}
|
||||
|
||||
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
|
||||
{
|
||||
return crypto_shash_alg(tfm)->descsize;
|
||||
}
|
||||
|
||||
static inline void *shash_desc_ctx(struct shash_desc *desc)
|
||||
{
|
||||
return desc->__ctx;
|
||||
}
|
||||
|
||||
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out);
|
||||
|
||||
static inline void crypto_shash_export(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
|
||||
}
|
||||
|
||||
int crypto_shash_import(struct shash_desc *desc, const u8 *in);
|
||||
|
||||
static inline int crypto_shash_init(struct shash_desc *desc)
|
||||
{
|
||||
return crypto_shash_alg(desc->tfm)->init(desc);
|
||||
}
|
||||
|
||||
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len);
|
||||
int crypto_shash_final(struct shash_desc *desc, u8 *out);
|
||||
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out);
|
||||
|
||||
#endif /* _CRYPTO_HASH_H */
|
||||
|
|
|
@ -39,6 +39,12 @@ extern const struct crypto_type crypto_ahash_type;
|
|||
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
|
||||
int crypto_hash_walk_first(struct ahash_request *req,
|
||||
struct crypto_hash_walk *walk);
|
||||
int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
|
||||
struct crypto_hash_walk *walk,
|
||||
struct scatterlist *sg, unsigned int len);
|
||||
|
||||
int crypto_register_shash(struct shash_alg *alg);
|
||||
int crypto_unregister_shash(struct shash_alg *alg);
|
||||
|
||||
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
|
||||
{
|
||||
|
@ -63,16 +69,16 @@ static inline struct ahash_request *ahash_dequeue_request(
|
|||
return ahash_request_cast(crypto_dequeue_request(queue));
|
||||
}
|
||||
|
||||
static inline void *ahash_request_ctx(struct ahash_request *req)
|
||||
{
|
||||
return req->__ctx;
|
||||
}
|
||||
|
||||
static inline int ahash_tfm_in_queue(struct crypto_queue *queue,
|
||||
struct crypto_ahash *tfm)
|
||||
{
|
||||
return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm));
|
||||
}
|
||||
|
||||
static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
|
||||
{
|
||||
return crypto_tfm_ctx(&tfm->base);
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_INTERNAL_HASH_H */
|
||||
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
extern u32 crc32c_le(u32 crc, unsigned char const *address, size_t length);
|
||||
extern u32 crc32c_be(u32 crc, unsigned char const *address, size_t length);
|
||||
extern u32 crc32c(u32 crc, const void *address, unsigned int length);
|
||||
|
||||
#define crc32c(seed, data, length) crc32c_le(seed, (unsigned char const *)data, length)
|
||||
/* This macro exists for backwards-compatibility. */
|
||||
#define crc32c_le crc32c
|
||||
|
||||
#endif /* _LINUX_CRC32C_H */
|
||||
|
|
|
@ -36,7 +36,8 @@
|
|||
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
|
||||
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
|
||||
#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
|
||||
#define CRYPTO_ALG_TYPE_HASH 0x00000009
|
||||
#define CRYPTO_ALG_TYPE_HASH 0x00000008
|
||||
#define CRYPTO_ALG_TYPE_SHASH 0x00000009
|
||||
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
|
||||
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
|
||||
|
||||
|
@ -220,6 +221,7 @@ struct ablkcipher_alg {
|
|||
|
||||
struct ahash_alg {
|
||||
int (*init)(struct ahash_request *req);
|
||||
int (*reinit)(struct ahash_request *req);
|
||||
int (*update)(struct ahash_request *req);
|
||||
int (*final)(struct ahash_request *req);
|
||||
int (*digest)(struct ahash_request *req);
|
||||
|
@ -480,6 +482,8 @@ struct crypto_tfm {
|
|||
struct compress_tfm compress;
|
||||
struct rng_tfm rng;
|
||||
} crt_u;
|
||||
|
||||
void (*exit)(struct crypto_tfm *tfm);
|
||||
|
||||
struct crypto_alg *__crt_alg;
|
||||
|
||||
|
@ -544,7 +548,9 @@ struct crypto_attr_u32 {
|
|||
* Transform user interface.
|
||||
*/
|
||||
|
||||
struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
|
||||
struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
|
||||
const struct crypto_type *frontend,
|
||||
u32 type, u32 mask);
|
||||
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
|
||||
void crypto_free_tfm(struct crypto_tfm *tfm);
|
||||
|
||||
|
|
|
@ -64,6 +64,8 @@ config CRC7
|
|||
|
||||
config LIBCRC32C
|
||||
tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
|
||||
select CRYPTO
|
||||
select CRYPTO_CRC32C
|
||||
help
|
||||
This option is provided for the case where no in-kernel-tree
|
||||
modules require CRC32c functions, but a module built outside the
|
||||
|
|
204
lib/libcrc32c.c
204
lib/libcrc32c.c
|
@ -30,168 +30,52 @@
|
|||
* any later version.
|
||||
*
|
||||
*/
|
||||
#include <linux/crc32c.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#include <crypto/hash.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static struct crypto_shash *tfm;
|
||||
|
||||
u32 crc32c(u32 crc, const void *address, unsigned int length)
|
||||
{
|
||||
struct {
|
||||
struct shash_desc shash;
|
||||
char ctx[crypto_shash_descsize(tfm)];
|
||||
} desc;
|
||||
int err;
|
||||
|
||||
desc.shash.tfm = tfm;
|
||||
desc.shash.flags = 0;
|
||||
*(u32 *)desc.ctx = crc;
|
||||
|
||||
err = crypto_shash_update(&desc.shash, address, length);
|
||||
BUG_ON(err);
|
||||
|
||||
return *(u32 *)desc.ctx;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(crc32c);
|
||||
|
||||
static int __init libcrc32c_mod_init(void)
|
||||
{
|
||||
tfm = crypto_alloc_shash("crc32c", 0, 0);
|
||||
if (IS_ERR(tfm))
|
||||
return PTR_ERR(tfm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit libcrc32c_mod_fini(void)
|
||||
{
|
||||
crypto_free_shash(tfm);
|
||||
}
|
||||
|
||||
module_init(libcrc32c_mod_init);
|
||||
module_exit(libcrc32c_mod_fini);
|
||||
|
||||
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
|
||||
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define CRC32C_POLY_BE 0x1EDC6F41
|
||||
#define CRC32C_POLY_LE 0x82F63B78
|
||||
|
||||
#ifndef CRC_LE_BITS
|
||||
# define CRC_LE_BITS 8
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Haven't generated a big-endian table yet, but the bit-wise version
|
||||
* should at least work.
|
||||
*/
|
||||
#if defined CRC_BE_BITS && CRC_BE_BITS != 1
|
||||
#undef CRC_BE_BITS
|
||||
#endif
|
||||
#ifndef CRC_BE_BITS
|
||||
# define CRC_BE_BITS 1
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(crc32c_le);
|
||||
|
||||
#if CRC_LE_BITS == 1
|
||||
/*
|
||||
* Compute things bit-wise, as done in crc32.c. We could share the tight
|
||||
* loop below with crc32 and vary the POLY if we don't find value in terms
|
||||
* of space and maintainability in keeping the two modules separate.
|
||||
*/
|
||||
u32 __pure
|
||||
crc32c_le(u32 crc, unsigned char const *p, size_t len)
|
||||
{
|
||||
int i;
|
||||
while (len--) {
|
||||
crc ^= *p++;
|
||||
for (i = 0; i < 8; i++)
|
||||
crc = (crc >> 1) ^ ((crc & 1) ? CRC32C_POLY_LE : 0);
|
||||
}
|
||||
return crc;
|
||||
}
|
||||
#else
|
||||
|
||||
/*
|
||||
* This is the CRC-32C table
|
||||
* Generated with:
|
||||
* width = 32 bits
|
||||
* poly = 0x1EDC6F41
|
||||
* reflect input bytes = true
|
||||
* reflect output bytes = true
|
||||
*/
|
||||
|
||||
static const u32 crc32c_table[256] = {
|
||||
0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
|
||||
0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
|
||||
0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
|
||||
0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L,
|
||||
0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
|
||||
0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L,
|
||||
0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L,
|
||||
0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL,
|
||||
0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL,
|
||||
0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
|
||||
0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L,
|
||||
0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL,
|
||||
0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L,
|
||||
0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL,
|
||||
0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
|
||||
0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L,
|
||||
0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L,
|
||||
0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L,
|
||||
0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L,
|
||||
0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
|
||||
0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L,
|
||||
0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L,
|
||||
0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L,
|
||||
0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L,
|
||||
0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
|
||||
0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L,
|
||||
0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L,
|
||||
0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L,
|
||||
0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L,
|
||||
0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
|
||||
0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L,
|
||||
0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L,
|
||||
0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL,
|
||||
0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L,
|
||||
0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
|
||||
0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL,
|
||||
0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L,
|
||||
0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL,
|
||||
0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL,
|
||||
0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
|
||||
0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L,
|
||||
0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL,
|
||||
0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL,
|
||||
0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L,
|
||||
0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
|
||||
0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L,
|
||||
0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L,
|
||||
0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL,
|
||||
0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L,
|
||||
0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
|
||||
0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL,
|
||||
0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L,
|
||||
0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL,
|
||||
0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L,
|
||||
0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
|
||||
0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL,
|
||||
0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL,
|
||||
0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L,
|
||||
0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L,
|
||||
0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
|
||||
0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L,
|
||||
0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL,
|
||||
0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL,
|
||||
0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L
|
||||
};
|
||||
|
||||
/*
|
||||
* Steps through buffer one byte at at time, calculates reflected
|
||||
* crc using table.
|
||||
*/
|
||||
|
||||
u32 __pure
|
||||
crc32c_le(u32 crc, unsigned char const *data, size_t length)
|
||||
{
|
||||
while (length--)
|
||||
crc =
|
||||
crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8);
|
||||
|
||||
return crc;
|
||||
}
|
||||
|
||||
#endif /* CRC_LE_BITS == 8 */
|
||||
|
||||
EXPORT_SYMBOL(crc32c_be);
|
||||
|
||||
#if CRC_BE_BITS == 1
|
||||
u32 __pure
|
||||
crc32c_be(u32 crc, unsigned char const *p, size_t len)
|
||||
{
|
||||
int i;
|
||||
while (len--) {
|
||||
crc ^= *p++ << 24;
|
||||
for (i = 0; i < 8; i++)
|
||||
crc =
|
||||
(crc << 1) ^ ((crc & 0x80000000) ? CRC32C_POLY_BE :
|
||||
0);
|
||||
}
|
||||
return crc;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Unit test
|
||||
*
|
||||
* A small unit test suite is implemented as part of the crypto suite.
|
||||
* Select CRYPTO_CRC32C and use the tcrypt module to run the tests.
|
||||
*/
|
||||
|
|
Загрузка…
Ссылка в новой задаче