Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Add support for AEAD in simd - Add fuzz testing to testmgr - Add panic_on_fail module parameter to testmgr - Use per-CPU struct instead multiple variables in scompress - Change verify API for akcipher Algorithms: - Convert x86 AEAD algorithms over to simd - Forbid 2-key 3DES in FIPS mode - Add EC-RDSA (GOST 34.10) algorithm Drivers: - Set output IV with ctr-aes in crypto4xx - Set output IV in rockchip - Fix potential length overflow with hashing in sun4i-ss - Fix computation error with ctr in vmx - Add SM4 protected keys support in ccree - Remove long-broken mxc-scc driver - Add rfc4106(gcm(aes)) cipher support in cavium/nitrox" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (179 commits) crypto: ccree - use a proper le32 type for le32 val crypto: ccree - remove set but not used variable 'du_size' crypto: ccree - Make cc_sec_disable static crypto: ccree - fix spelling mistake "protedcted" -> "protected" crypto: caam/qi2 - generate hash keys in-place crypto: caam/qi2 - fix DMA mapping of stack memory crypto: caam/qi2 - fix zero-length buffer DMA mapping crypto: stm32/cryp - update to return iv_out crypto: stm32/cryp - remove request mutex protection crypto: stm32/cryp - add weak key check for DES crypto: atmel - remove set but not used variable 'alg_name' crypto: picoxcell - Use dev_get_drvdata() crypto: crypto4xx - get rid of redundant using_sd variable crypto: crypto4xx - use sync skcipher for fallback crypto: crypto4xx - fix cfb and ofb "overran dst buffer" issues crypto: crypto4xx - fix ctr-aes missing output IV crypto: ecrdsa - select ASN1 and OID_REGISTRY for EC-RDSA crypto: ux500 - use ccflags-y instead of CFLAGS_<basename>.o crypto: ccree - handle tee fips error during power management resume crypto: ccree - add function to handle cryptocell tee fips error ...
This commit is contained in:
Коммит
81ff5d2cba
|
@ -133,7 +133,6 @@ Code Example For Use of Operational State Memory With SHASH
|
|||
if (!sdesc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
sdesc->shash.tfm = alg;
|
||||
sdesc->shash.flags = 0x0;
|
||||
return sdesc;
|
||||
}
|
||||
|
||||
|
|
|
@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
|
|||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -93,7 +94,7 @@ static int chacha_neon(struct skcipher_request *req)
|
|||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_chacha_crypt(req);
|
||||
|
||||
return chacha_neon_stream_xor(req, ctx, req->iv);
|
||||
|
@ -107,7 +108,7 @@ static int xchacha_neon(struct skcipher_request *req)
|
|||
u32 state[16];
|
||||
u8 real_iv[16];
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_xchacha_crypt(req);
|
||||
|
||||
crypto_chacha_init(state, ctx, req->iv);
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/string.h>
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/neon.h>
|
||||
|
@ -113,7 +114,7 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
|
|||
u32 *crc = shash_desc_ctx(desc);
|
||||
unsigned int l;
|
||||
|
||||
if (may_use_simd()) {
|
||||
if (crypto_simd_usable()) {
|
||||
if ((u32)data % SCALE_F) {
|
||||
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
|
||||
|
||||
|
@ -147,7 +148,7 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
|
|||
u32 *crc = shash_desc_ctx(desc);
|
||||
unsigned int l;
|
||||
|
||||
if (may_use_simd()) {
|
||||
if (crypto_simd_usable()) {
|
||||
if ((u32)data % SCALE_F) {
|
||||
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/string.h>
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
|
@ -36,7 +37,7 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
u16 *crc = shash_desc_ctx(desc);
|
||||
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
|
||||
kernel_neon_begin();
|
||||
*crc = crc_t10dif_pmull(*crc, data, length);
|
||||
kernel_neon_end();
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/unaligned.h>
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crypto.h>
|
||||
|
@ -185,7 +186,6 @@ static int ghash_async_init(struct ahash_request *req)
|
|||
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
||||
|
||||
desc->tfm = child;
|
||||
desc->flags = req->base.flags;
|
||||
return crypto_shash_init(desc);
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ static int ghash_async_update(struct ahash_request *req)
|
|||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||
|
||||
if (!may_use_simd() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||
|
@ -214,7 +214,7 @@ static int ghash_async_final(struct ahash_request *req)
|
|||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||
|
||||
if (!may_use_simd() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||
|
@ -232,7 +232,7 @@ static int ghash_async_digest(struct ahash_request *req)
|
|||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||
|
||||
if (!may_use_simd() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||
|
@ -242,7 +242,6 @@ static int ghash_async_digest(struct ahash_request *req)
|
|||
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
||||
|
||||
desc->tfm = child;
|
||||
desc->flags = req->base.flags;
|
||||
return shash_ahash_digest(req, desc);
|
||||
}
|
||||
}
|
||||
|
@ -255,7 +254,6 @@ static int ghash_async_import(struct ahash_request *req, const void *in)
|
|||
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
||||
|
||||
desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
|
||||
desc->flags = req->base.flags;
|
||||
|
||||
return crypto_shash_import(desc, in);
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/nhpoly1305.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
|
@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
|
|||
static int nhpoly1305_neon_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
if (srclen < 64 || !may_use_simd())
|
||||
if (srclen < 64 || !crypto_simd_usable())
|
||||
return crypto_nhpoly1305_update(desc, src, srclen);
|
||||
|
||||
do {
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha1_base.h>
|
||||
#include <linux/cpufeature.h>
|
||||
|
@ -33,7 +34,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha1_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
|
||||
return sha1_update_arm(desc, data, len);
|
||||
|
||||
|
@ -47,7 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
|
|||
static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return sha1_finup_arm(desc, data, len, out);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -39,7 +40,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha1_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
|
||||
return sha1_update_arm(desc, data, len);
|
||||
|
||||
|
@ -54,7 +55,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
|
|||
static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return sha1_finup_arm(desc, data, len, out);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha256_base.h>
|
||||
#include <linux/cpufeature.h>
|
||||
|
@ -34,7 +35,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
|
||||
return crypto_sha256_arm_update(desc, data, len);
|
||||
|
||||
|
@ -49,7 +50,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
|
|||
static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha256_arm_finup(desc, data, len, out);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
|
@ -34,7 +35,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
|
||||
return crypto_sha256_arm_update(desc, data, len);
|
||||
|
||||
|
@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
static int sha256_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha256_arm_finup(desc, data, len, out);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha512_base.h>
|
||||
#include <linux/crypto.h>
|
||||
|
@ -30,7 +31,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha512_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
|
||||
return sha512_arm_update(desc, data, len);
|
||||
|
||||
|
@ -45,7 +46,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
|
|||
static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return sha512_arm_finup(desc, data, len, out);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <crypto/aes.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
|
@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
|
|||
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
|
||||
u32 abytes, u32 *macp)
|
||||
{
|
||||
if (may_use_simd()) {
|
||||
if (crypto_simd_usable()) {
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
|
||||
num_rounds(key));
|
||||
|
@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req)
|
|||
|
||||
err = skcipher_walk_aead_encrypt(&walk, req, false);
|
||||
|
||||
if (may_use_simd()) {
|
||||
if (crypto_simd_usable()) {
|
||||
while (walk.nbytes) {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
|
||||
|
@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req)
|
|||
|
||||
err = skcipher_walk_aead_decrypt(&walk, req, false);
|
||||
|
||||
if (may_use_simd()) {
|
||||
if (crypto_simd_usable()) {
|
||||
while (walk.nbytes) {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/simd.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
|
|||
{
|
||||
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (!may_use_simd()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
|
||||
return;
|
||||
}
|
||||
|
@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
|
|||
{
|
||||
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (!may_use_simd()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
|
|||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return aes_ctr_encrypt_fallback(ctx, req);
|
||||
|
||||
return ctr_encrypt(req);
|
||||
|
@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
|
|||
{
|
||||
int rounds = 6 + ctx->key_length / 4;
|
||||
|
||||
if (may_use_simd()) {
|
||||
if (crypto_simd_usable()) {
|
||||
kernel_neon_begin();
|
||||
aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
|
||||
enc_after);
|
||||
|
@ -707,7 +707,7 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out)
|
|||
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0);
|
||||
mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0);
|
||||
|
||||
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
|
||||
|
||||
|
|
|
@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
|
|||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return aes_ctr_encrypt_fallback(&ctx->fallback, req);
|
||||
|
||||
return ctr_encrypt(req);
|
||||
|
@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
|
|||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
kernel_neon_begin();
|
||||
neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -90,7 +91,7 @@ static int chacha_neon(struct skcipher_request *req)
|
|||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_chacha_crypt(req);
|
||||
|
||||
return chacha_neon_stream_xor(req, ctx, req->iv);
|
||||
|
@ -104,7 +105,7 @@ static int xchacha_neon(struct skcipher_request *req)
|
|||
u32 state[16];
|
||||
u8 real_iv[16];
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_xchacha_crypt(req);
|
||||
|
||||
crypto_chacha_init(state, ctx, req->iv);
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/string.h>
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
|
@ -38,7 +39,7 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
u16 *crc = shash_desc_ctx(desc);
|
||||
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
|
||||
kernel_neon_begin();
|
||||
*crc = crc_t10dif_pmull_p8(*crc, data, length);
|
||||
kernel_neon_end();
|
||||
|
@ -54,7 +55,7 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
u16 *crc = shash_desc_ctx(desc);
|
||||
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
|
||||
kernel_neon_begin();
|
||||
*crc = crc_t10dif_pmull_p64(*crc, data, length);
|
||||
kernel_neon_end();
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/cpufeature.h>
|
||||
|
@ -89,7 +90,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
|
|||
struct ghash_key const *k,
|
||||
const char *head))
|
||||
{
|
||||
if (likely(may_use_simd())) {
|
||||
if (likely(crypto_simd_usable())) {
|
||||
kernel_neon_begin();
|
||||
simd_update(blocks, dg, src, key, head);
|
||||
kernel_neon_end();
|
||||
|
@ -441,7 +442,7 @@ static int gcm_encrypt(struct aead_request *req)
|
|||
|
||||
err = skcipher_walk_aead_encrypt(&walk, req, false);
|
||||
|
||||
if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
|
||||
if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
|
||||
u32 const *rk = NULL;
|
||||
|
||||
kernel_neon_begin();
|
||||
|
@ -473,9 +474,11 @@ static int gcm_encrypt(struct aead_request *req)
|
|||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
|
||||
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
|
||||
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
const int blocks =
|
||||
walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
int remaining = blocks;
|
||||
|
||||
do {
|
||||
__aes_arm64_encrypt(ctx->aes_key.key_enc,
|
||||
|
@ -485,9 +488,9 @@ static int gcm_encrypt(struct aead_request *req)
|
|||
|
||||
dst += AES_BLOCK_SIZE;
|
||||
src += AES_BLOCK_SIZE;
|
||||
} while (--blocks > 0);
|
||||
} while (--remaining > 0);
|
||||
|
||||
ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
|
||||
ghash_do_update(blocks, dg,
|
||||
walk.dst.virt.addr, &ctx->ghash_key,
|
||||
NULL, pmull_ghash_update_p64);
|
||||
|
||||
|
@ -563,7 +566,7 @@ static int gcm_decrypt(struct aead_request *req)
|
|||
|
||||
err = skcipher_walk_aead_decrypt(&walk, req, false);
|
||||
|
||||
if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
|
||||
if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
|
||||
u32 const *rk = NULL;
|
||||
|
||||
kernel_neon_begin();
|
||||
|
@ -609,7 +612,7 @@ static int gcm_decrypt(struct aead_request *req)
|
|||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
|
||||
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
|
||||
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/nhpoly1305.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
|
@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
|
|||
static int nhpoly1305_neon_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
if (srclen < 64 || !may_use_simd())
|
||||
if (srclen < 64 || !crypto_simd_usable())
|
||||
return crypto_nhpoly1305_update(desc, src, srclen);
|
||||
|
||||
do {
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/simd.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha1_base.h>
|
||||
#include <linux/cpufeature.h>
|
||||
|
@ -38,7 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha1_update(desc, data, len);
|
||||
|
||||
sctx->finalize = 0;
|
||||
|
@ -56,7 +57,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
|
||||
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
|
||||
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha1_finup(desc, data, len, out);
|
||||
|
||||
/*
|
||||
|
@ -78,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
|
|||
{
|
||||
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha1_finup(desc, NULL, 0, out);
|
||||
|
||||
sctx->finalize = 0;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/simd.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha256_base.h>
|
||||
#include <linux/cpufeature.h>
|
||||
|
@ -42,7 +43,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return sha256_base_do_update(desc, data, len,
|
||||
(sha256_block_fn *)sha256_block_data_order);
|
||||
|
||||
|
@ -61,7 +62,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
|
||||
|
||||
if (!may_use_simd()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
if (len)
|
||||
sha256_base_do_update(desc, data, len,
|
||||
(sha256_block_fn *)sha256_block_data_order);
|
||||
|
@ -90,7 +91,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
|
|||
{
|
||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
sha256_base_do_finalize(desc,
|
||||
(sha256_block_fn *)sha256_block_data_order);
|
||||
return sha256_base_finish(desc, out);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha256_base.h>
|
||||
#include <linux/cryptohash.h>
|
||||
|
@ -89,7 +90,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return sha256_base_do_update(desc, data, len,
|
||||
(sha256_block_fn *)sha256_block_data_order);
|
||||
|
||||
|
@ -119,7 +120,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
|
|||
static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
if (!may_use_simd()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
if (len)
|
||||
sha256_base_do_update(desc, data, len,
|
||||
(sha256_block_fn *)sha256_block_data_order);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/simd.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/sha3.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crypto.h>
|
||||
|
@ -32,7 +33,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
|
|||
struct sha3_state *sctx = shash_desc_ctx(desc);
|
||||
unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
|
||||
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha3_update(desc, data, len);
|
||||
|
||||
if ((sctx->partial + len) >= sctx->rsiz) {
|
||||
|
@ -76,7 +77,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
|
|||
__le64 *digest = (__le64 *)out;
|
||||
int i;
|
||||
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha3_final(desc, out);
|
||||
|
||||
sctx->buf[sctx->partial++] = 0x06;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <asm/simd.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha512_base.h>
|
||||
#include <linux/cpufeature.h>
|
||||
|
@ -31,7 +32,7 @@ asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
|
|||
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return sha512_base_do_update(desc, data, len,
|
||||
(sha512_block_fn *)sha512_block_data_order);
|
||||
|
||||
|
@ -46,7 +47,7 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
|
|||
static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
if (!may_use_simd()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
if (len)
|
||||
sha512_base_do_update(desc, data, len,
|
||||
(sha512_block_fn *)sha512_block_data_order);
|
||||
|
@ -65,7 +66,7 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||
|
||||
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
if (!may_use_simd()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
sha512_base_do_finalize(desc,
|
||||
(sha512_block_fn *)sha512_block_data_order);
|
||||
return sha512_base_finish(desc, out);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/simd.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/sm3.h>
|
||||
#include <crypto/sm3_base.h>
|
||||
#include <linux/cpufeature.h>
|
||||
|
@ -28,7 +29,7 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
|
|||
static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sm3_update(desc, data, len);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
@ -40,7 +41,7 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
|
|||
|
||||
static int sm3_ce_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sm3_finup(desc, NULL, 0, out);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
@ -53,7 +54,7 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out)
|
|||
static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out)
|
||||
{
|
||||
if (!may_use_simd())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sm3_finup(desc, data, len, out);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/sm4.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crypto.h>
|
||||
|
@ -20,7 +21,7 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|||
{
|
||||
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (!may_use_simd()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
crypto_sm4_encrypt(tfm, out, in);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
|
@ -33,7 +34,7 @@ static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|||
{
|
||||
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (!may_use_simd()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
crypto_sm4_decrypt(tfm, out, in);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
#include <linux/crc32.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
#define CHKSUM_BLOCK_SIZE 1
|
||||
|
@ -22,7 +24,7 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
|
|||
unsigned int prealign;
|
||||
unsigned int tail;
|
||||
|
||||
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
|
||||
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
|
||||
return __crc32c_le(crc, p, len);
|
||||
|
||||
if ((unsigned long)p & VMX_ALIGN_MASK) {
|
||||
|
|
|
@ -12,11 +12,13 @@
|
|||
|
||||
#include <linux/crc-t10dif.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
#define VMX_ALIGN 16
|
||||
|
@ -32,7 +34,7 @@ static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
|
|||
unsigned int tail;
|
||||
u32 crc = crci;
|
||||
|
||||
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
|
||||
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
|
||||
return crc_t10dif_generic(crc, p, len);
|
||||
|
||||
if ((unsigned long)p & VMX_ALIGN_MASK) {
|
||||
|
|
|
@ -10,3 +10,4 @@ generic-y += mcs_spinlock.h
|
|||
generic-y += preempt.h
|
||||
generic-y += vtime.h
|
||||
generic-y += msi.h
|
||||
generic-y += simd.h
|
||||
|
|
|
@ -224,24 +224,11 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
|
|||
unsigned int key_len)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int err;
|
||||
|
||||
if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
|
||||
crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
|
||||
DES_KEY_SIZE)) &&
|
||||
(tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* in fips mode, ensure k1 != k2 and k2 != k3 and k1 != k3 */
|
||||
if (fips_enabled &&
|
||||
!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
|
||||
crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
|
||||
DES_KEY_SIZE) &&
|
||||
crypto_memneq(key, &key[DES_KEY_SIZE * 2], DES_KEY_SIZE))) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
err = __des3_verify_key(&tfm->crt_flags, key);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
memcpy(ctx->key, key, key_len);
|
||||
return 0;
|
||||
|
|
|
@ -201,18 +201,15 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
|
|||
unsigned int keylen)
|
||||
{
|
||||
struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
const u32 *K = (const u32 *)key;
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
u64 k1[DES_EXPKEY_WORDS / 2];
|
||||
u64 k2[DES_EXPKEY_WORDS / 2];
|
||||
u64 k3[DES_EXPKEY_WORDS / 2];
|
||||
int err;
|
||||
|
||||
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
|
||||
(*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
err = __des3_verify_key(flags, key);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
des_sparc64_key_expand((const u32 *)key, k1);
|
||||
key += DES_KEY_SIZE;
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
* any later version.
|
||||
*/
|
||||
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -242,80 +242,7 @@ static void crypto_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
|
|||
{
|
||||
}
|
||||
|
||||
static int cryptd_aegis128_aesni_setkey(struct crypto_aead *aead,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static int cryptd_aegis128_aesni_setauthsize(struct crypto_aead *aead,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
|
||||
}
|
||||
|
||||
static int cryptd_aegis128_aesni_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
}
|
||||
|
||||
static int cryptd_aegis128_aesni_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
|
||||
static int cryptd_aegis128_aesni_init_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead *cryptd_tfm;
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_tfm = cryptd_alloc_aead("__aegis128-aesni", CRYPTO_ALG_INTERNAL,
|
||||
CRYPTO_ALG_INTERNAL);
|
||||
if (IS_ERR(cryptd_tfm))
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
|
||||
*ctx = cryptd_tfm;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cryptd_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_free_aead(*ctx);
|
||||
}
|
||||
|
||||
static struct aead_alg crypto_aegis128_aesni_alg[] = {
|
||||
{
|
||||
static struct aead_alg crypto_aegis128_aesni_alg = {
|
||||
.setkey = crypto_aegis128_aesni_setkey,
|
||||
.setauthsize = crypto_aegis128_aesni_setauthsize,
|
||||
.encrypt = crypto_aegis128_aesni_encrypt,
|
||||
|
@ -333,40 +260,17 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = {
|
|||
.cra_ctxsize = sizeof(struct aegis_ctx) +
|
||||
__alignof__(struct aegis_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_priority = 400,
|
||||
|
||||
.cra_name = "__aegis128",
|
||||
.cra_driver_name = "__aegis128-aesni",
|
||||
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
}, {
|
||||
.setkey = cryptd_aegis128_aesni_setkey,
|
||||
.setauthsize = cryptd_aegis128_aesni_setauthsize,
|
||||
.encrypt = cryptd_aegis128_aesni_encrypt,
|
||||
.decrypt = cryptd_aegis128_aesni_decrypt,
|
||||
.init = cryptd_aegis128_aesni_init_tfm,
|
||||
.exit = cryptd_aegis128_aesni_exit_tfm,
|
||||
|
||||
.ivsize = AEGIS128_NONCE_SIZE,
|
||||
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
|
||||
.chunksize = AEGIS128_BLOCK_SIZE,
|
||||
|
||||
.base = {
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct cryptd_aead *),
|
||||
.cra_alignmask = 0,
|
||||
|
||||
.cra_priority = 400,
|
||||
|
||||
.cra_name = "aegis128",
|
||||
.cra_driver_name = "aegis128-aesni",
|
||||
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static struct simd_aead_alg *simd_alg;
|
||||
|
||||
static int __init crypto_aegis128_aesni_module_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
|
||||
|
@ -374,14 +278,13 @@ static int __init crypto_aegis128_aesni_module_init(void)
|
|||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
return crypto_register_aeads(crypto_aegis128_aesni_alg,
|
||||
ARRAY_SIZE(crypto_aegis128_aesni_alg));
|
||||
return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1,
|
||||
&simd_alg);
|
||||
}
|
||||
|
||||
static void __exit crypto_aegis128_aesni_module_exit(void)
|
||||
{
|
||||
crypto_unregister_aeads(crypto_aegis128_aesni_alg,
|
||||
ARRAY_SIZE(crypto_aegis128_aesni_alg));
|
||||
simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
|
||||
}
|
||||
|
||||
module_init(crypto_aegis128_aesni_module_init);
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
* any later version.
|
||||
*/
|
||||
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -242,80 +242,7 @@ static void crypto_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
|
|||
{
|
||||
}
|
||||
|
||||
static int cryptd_aegis128l_aesni_setkey(struct crypto_aead *aead,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static int cryptd_aegis128l_aesni_setauthsize(struct crypto_aead *aead,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
|
||||
}
|
||||
|
||||
static int cryptd_aegis128l_aesni_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
}
|
||||
|
||||
static int cryptd_aegis128l_aesni_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
|
||||
static int cryptd_aegis128l_aesni_init_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead *cryptd_tfm;
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_tfm = cryptd_alloc_aead("__aegis128l-aesni", CRYPTO_ALG_INTERNAL,
|
||||
CRYPTO_ALG_INTERNAL);
|
||||
if (IS_ERR(cryptd_tfm))
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
|
||||
*ctx = cryptd_tfm;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cryptd_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_free_aead(*ctx);
|
||||
}
|
||||
|
||||
static struct aead_alg crypto_aegis128l_aesni_alg[] = {
|
||||
{
|
||||
static struct aead_alg crypto_aegis128l_aesni_alg = {
|
||||
.setkey = crypto_aegis128l_aesni_setkey,
|
||||
.setauthsize = crypto_aegis128l_aesni_setauthsize,
|
||||
.encrypt = crypto_aegis128l_aesni_encrypt,
|
||||
|
@ -333,40 +260,17 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = {
|
|||
.cra_ctxsize = sizeof(struct aegis_ctx) +
|
||||
__alignof__(struct aegis_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_priority = 400,
|
||||
|
||||
.cra_name = "__aegis128l",
|
||||
.cra_driver_name = "__aegis128l-aesni",
|
||||
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
}, {
|
||||
.setkey = cryptd_aegis128l_aesni_setkey,
|
||||
.setauthsize = cryptd_aegis128l_aesni_setauthsize,
|
||||
.encrypt = cryptd_aegis128l_aesni_encrypt,
|
||||
.decrypt = cryptd_aegis128l_aesni_decrypt,
|
||||
.init = cryptd_aegis128l_aesni_init_tfm,
|
||||
.exit = cryptd_aegis128l_aesni_exit_tfm,
|
||||
|
||||
.ivsize = AEGIS128L_NONCE_SIZE,
|
||||
.maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
|
||||
.chunksize = AEGIS128L_BLOCK_SIZE,
|
||||
|
||||
.base = {
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct cryptd_aead *),
|
||||
.cra_alignmask = 0,
|
||||
|
||||
.cra_priority = 400,
|
||||
|
||||
.cra_name = "aegis128l",
|
||||
.cra_driver_name = "aegis128l-aesni",
|
||||
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static struct simd_aead_alg *simd_alg;
|
||||
|
||||
static int __init crypto_aegis128l_aesni_module_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
|
||||
|
@ -374,14 +278,13 @@ static int __init crypto_aegis128l_aesni_module_init(void)
|
|||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
return crypto_register_aeads(crypto_aegis128l_aesni_alg,
|
||||
ARRAY_SIZE(crypto_aegis128l_aesni_alg));
|
||||
return simd_register_aeads_compat(&crypto_aegis128l_aesni_alg, 1,
|
||||
&simd_alg);
|
||||
}
|
||||
|
||||
static void __exit crypto_aegis128l_aesni_module_exit(void)
|
||||
{
|
||||
crypto_unregister_aeads(crypto_aegis128l_aesni_alg,
|
||||
ARRAY_SIZE(crypto_aegis128l_aesni_alg));
|
||||
simd_unregister_aeads(&crypto_aegis128l_aesni_alg, 1, &simd_alg);
|
||||
}
|
||||
|
||||
module_init(crypto_aegis128l_aesni_module_init);
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
* any later version.
|
||||
*/
|
||||
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -242,80 +242,7 @@ static void crypto_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
|
|||
{
|
||||
}
|
||||
|
||||
static int cryptd_aegis256_aesni_setkey(struct crypto_aead *aead,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static int cryptd_aegis256_aesni_setauthsize(struct crypto_aead *aead,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
|
||||
}
|
||||
|
||||
static int cryptd_aegis256_aesni_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
}
|
||||
|
||||
static int cryptd_aegis256_aesni_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
|
||||
static int cryptd_aegis256_aesni_init_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead *cryptd_tfm;
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_tfm = cryptd_alloc_aead("__aegis256-aesni", CRYPTO_ALG_INTERNAL,
|
||||
CRYPTO_ALG_INTERNAL);
|
||||
if (IS_ERR(cryptd_tfm))
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
|
||||
*ctx = cryptd_tfm;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cryptd_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_free_aead(*ctx);
|
||||
}
|
||||
|
||||
static struct aead_alg crypto_aegis256_aesni_alg[] = {
|
||||
{
|
||||
static struct aead_alg crypto_aegis256_aesni_alg = {
|
||||
.setkey = crypto_aegis256_aesni_setkey,
|
||||
.setauthsize = crypto_aegis256_aesni_setauthsize,
|
||||
.encrypt = crypto_aegis256_aesni_encrypt,
|
||||
|
@ -333,40 +260,17 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = {
|
|||
.cra_ctxsize = sizeof(struct aegis_ctx) +
|
||||
__alignof__(struct aegis_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_priority = 400,
|
||||
|
||||
.cra_name = "__aegis256",
|
||||
.cra_driver_name = "__aegis256-aesni",
|
||||
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
}, {
|
||||
.setkey = cryptd_aegis256_aesni_setkey,
|
||||
.setauthsize = cryptd_aegis256_aesni_setauthsize,
|
||||
.encrypt = cryptd_aegis256_aesni_encrypt,
|
||||
.decrypt = cryptd_aegis256_aesni_decrypt,
|
||||
.init = cryptd_aegis256_aesni_init_tfm,
|
||||
.exit = cryptd_aegis256_aesni_exit_tfm,
|
||||
|
||||
.ivsize = AEGIS256_NONCE_SIZE,
|
||||
.maxauthsize = AEGIS256_MAX_AUTH_SIZE,
|
||||
.chunksize = AEGIS256_BLOCK_SIZE,
|
||||
|
||||
.base = {
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct cryptd_aead *),
|
||||
.cra_alignmask = 0,
|
||||
|
||||
.cra_priority = 400,
|
||||
|
||||
.cra_name = "aegis256",
|
||||
.cra_driver_name = "aegis256-aesni",
|
||||
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static struct simd_aead_alg *simd_alg;
|
||||
|
||||
static int __init crypto_aegis256_aesni_module_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
|
||||
|
@ -374,14 +278,13 @@ static int __init crypto_aegis256_aesni_module_init(void)
|
|||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
return crypto_register_aeads(crypto_aegis256_aesni_alg,
|
||||
ARRAY_SIZE(crypto_aegis256_aesni_alg));
|
||||
return simd_register_aeads_compat(&crypto_aegis256_aesni_alg, 1,
|
||||
&simd_alg);
|
||||
}
|
||||
|
||||
static void __exit crypto_aegis256_aesni_module_exit(void)
|
||||
{
|
||||
crypto_unregister_aeads(crypto_aegis256_aesni_alg,
|
||||
ARRAY_SIZE(crypto_aegis256_aesni_alg));
|
||||
simd_unregister_aeads(&crypto_aegis256_aesni_alg, 1, &simd_alg);
|
||||
}
|
||||
|
||||
module_init(crypto_aegis256_aesni_module_init);
|
||||
|
|
|
@ -25,14 +25,13 @@
|
|||
#include <linux/err.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/gcm.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/crypto/aes.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
|
@ -333,7 +332,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!irq_fpu_usable())
|
||||
if (!crypto_simd_usable())
|
||||
err = crypto_aes_expand_key(ctx, in_key, key_len);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
|
@ -354,7 +353,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
{
|
||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
||||
|
||||
if (!irq_fpu_usable())
|
||||
if (!crypto_simd_usable())
|
||||
crypto_aes_encrypt_x86(ctx, dst, src);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
|
@ -367,7 +366,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
{
|
||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
||||
|
||||
if (!irq_fpu_usable())
|
||||
if (!crypto_simd_usable())
|
||||
crypto_aes_decrypt_x86(ctx, dst, src);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
|
@ -643,29 +642,6 @@ static int xts_decrypt(struct skcipher_request *req)
|
|||
aes_ctx(ctx->raw_crypt_ctx));
|
||||
}
|
||||
|
||||
static int rfc4106_init(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead *cryptd_tfm;
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
CRYPTO_ALG_INTERNAL);
|
||||
if (IS_ERR(cryptd_tfm))
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
|
||||
*ctx = cryptd_tfm;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rfc4106_exit(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_free_aead(*ctx);
|
||||
}
|
||||
|
||||
static int
|
||||
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
|
||||
{
|
||||
|
@ -710,15 +686,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
|
|||
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
|
||||
}
|
||||
|
||||
static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(parent);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
|
||||
}
|
||||
|
||||
/* This is the Integrity Check Value (aka the authentication tag) length and can
|
||||
* be 8, 12 or 16 bytes long. */
|
||||
static int common_rfc4106_set_authsize(struct crypto_aead *aead,
|
||||
unsigned int authsize)
|
||||
{
|
||||
|
@ -734,17 +703,6 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* This is the Integrity Check Value (aka the authentication tag length and can
|
||||
* be 8, 12 or 16 bytes long. */
|
||||
static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(parent);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
|
||||
}
|
||||
|
||||
static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
|
||||
unsigned int authsize)
|
||||
{
|
||||
|
@ -964,38 +922,6 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
|
|||
return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
|
||||
aes_ctx);
|
||||
}
|
||||
|
||||
static int gcmaes_wrapper_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
tfm = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
tfm = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, tfm);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
}
|
||||
|
||||
static int gcmaes_wrapper_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
tfm = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
tfm = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, tfm);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct crypto_alg aesni_algs[] = { {
|
||||
|
@ -1148,31 +1074,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
|
|||
aes_ctx);
|
||||
}
|
||||
|
||||
static int generic_gcmaes_init(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead *cryptd_tfm;
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
CRYPTO_ALG_INTERNAL);
|
||||
if (IS_ERR(cryptd_tfm))
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
|
||||
*ctx = cryptd_tfm;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void generic_gcmaes_exit(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_free_aead(*ctx);
|
||||
}
|
||||
|
||||
static struct aead_alg aesni_aead_algs[] = { {
|
||||
static struct aead_alg aesni_aeads[] = { {
|
||||
.setkey = common_rfc4106_set_key,
|
||||
.setauthsize = common_rfc4106_set_authsize,
|
||||
.encrypt = helper_rfc4106_encrypt,
|
||||
|
@ -1180,32 +1082,15 @@ static struct aead_alg aesni_aead_algs[] = { {
|
|||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.maxauthsize = 16,
|
||||
.base = {
|
||||
.cra_name = "__gcm-aes-aesni",
|
||||
.cra_driver_name = "__driver-gcm-aes-aesni",
|
||||
.cra_name = "__rfc4106(gcm(aes))",
|
||||
.cra_driver_name = "__rfc4106-gcm-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
|
||||
.cra_alignmask = AESNI_ALIGN - 1,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
}, {
|
||||
.init = rfc4106_init,
|
||||
.exit = rfc4106_exit,
|
||||
.setkey = gcmaes_wrapper_set_key,
|
||||
.setauthsize = gcmaes_wrapper_set_authsize,
|
||||
.encrypt = gcmaes_wrapper_encrypt,
|
||||
.decrypt = gcmaes_wrapper_decrypt,
|
||||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.maxauthsize = 16,
|
||||
.base = {
|
||||
.cra_name = "rfc4106(gcm(aes))",
|
||||
.cra_driver_name = "rfc4106-gcm-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct cryptd_aead *),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
}, {
|
||||
.setkey = generic_gcmaes_set_key,
|
||||
.setauthsize = generic_gcmaes_set_authsize,
|
||||
|
@ -1214,38 +1099,21 @@ static struct aead_alg aesni_aead_algs[] = { {
|
|||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = 16,
|
||||
.base = {
|
||||
.cra_name = "__generic-gcm-aes-aesni",
|
||||
.cra_driver_name = "__driver-generic-gcm-aes-aesni",
|
||||
.cra_priority = 0,
|
||||
.cra_name = "__gcm(aes)",
|
||||
.cra_driver_name = "__generic-gcm-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
|
||||
.cra_alignmask = AESNI_ALIGN - 1,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
}, {
|
||||
.init = generic_gcmaes_init,
|
||||
.exit = generic_gcmaes_exit,
|
||||
.setkey = gcmaes_wrapper_set_key,
|
||||
.setauthsize = gcmaes_wrapper_set_authsize,
|
||||
.encrypt = gcmaes_wrapper_encrypt,
|
||||
.decrypt = gcmaes_wrapper_decrypt,
|
||||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = 16,
|
||||
.base = {
|
||||
.cra_name = "gcm(aes)",
|
||||
.cra_driver_name = "generic-gcm-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct cryptd_aead *),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
} };
|
||||
#else
|
||||
static struct aead_alg aesni_aead_algs[0];
|
||||
static struct aead_alg aesni_aeads[0];
|
||||
#endif
|
||||
|
||||
static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
|
||||
|
||||
static const struct x86_cpu_id aesni_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_AES),
|
||||
|
@ -1253,23 +1121,9 @@ static const struct x86_cpu_id aesni_cpu_id[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
|
||||
|
||||
static void aesni_free_simds(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
|
||||
aesni_simd_skciphers[i]; i++)
|
||||
simd_skcipher_free(aesni_simd_skciphers[i]);
|
||||
}
|
||||
|
||||
static int __init aesni_init(void)
|
||||
{
|
||||
struct simd_skcipher_alg *simd;
|
||||
const char *basename;
|
||||
const char *algname;
|
||||
const char *drvname;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (!x86_match_cpu(aesni_cpu_id))
|
||||
return -ENODEV;
|
||||
|
@ -1304,36 +1158,22 @@ static int __init aesni_init(void)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = crypto_register_skciphers(aesni_skciphers,
|
||||
ARRAY_SIZE(aesni_skciphers));
|
||||
err = simd_register_skciphers_compat(aesni_skciphers,
|
||||
ARRAY_SIZE(aesni_skciphers),
|
||||
aesni_simd_skciphers);
|
||||
if (err)
|
||||
goto unregister_algs;
|
||||
|
||||
err = crypto_register_aeads(aesni_aead_algs,
|
||||
ARRAY_SIZE(aesni_aead_algs));
|
||||
err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
|
||||
aesni_simd_aeads);
|
||||
if (err)
|
||||
goto unregister_skciphers;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
|
||||
algname = aesni_skciphers[i].base.cra_name + 2;
|
||||
drvname = aesni_skciphers[i].base.cra_driver_name + 2;
|
||||
basename = aesni_skciphers[i].base.cra_driver_name;
|
||||
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
||||
err = PTR_ERR(simd);
|
||||
if (IS_ERR(simd))
|
||||
goto unregister_simds;
|
||||
|
||||
aesni_simd_skciphers[i] = simd;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unregister_simds:
|
||||
aesni_free_simds();
|
||||
crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
|
||||
unregister_skciphers:
|
||||
crypto_unregister_skciphers(aesni_skciphers,
|
||||
ARRAY_SIZE(aesni_skciphers));
|
||||
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
|
||||
aesni_simd_skciphers);
|
||||
unregister_algs:
|
||||
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
|
||||
return err;
|
||||
|
@ -1341,10 +1181,10 @@ unregister_algs:
|
|||
|
||||
static void __exit aesni_exit(void)
|
||||
{
|
||||
aesni_free_simds();
|
||||
crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
|
||||
crypto_unregister_skciphers(aesni_skciphers,
|
||||
ARRAY_SIZE(aesni_skciphers));
|
||||
simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
|
||||
aesni_simd_aeads);
|
||||
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
|
||||
aesni_simd_skciphers);
|
||||
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
|
||||
}
|
||||
|
||||
|
|
|
@ -12,10 +12,10 @@
|
|||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
#define CHACHA_STATE_ALIGN 16
|
||||
|
@ -170,7 +170,7 @@ static int chacha_simd(struct skcipher_request *req)
|
|||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_chacha_crypt(req);
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
@ -193,7 +193,7 @@ static int xchacha_simd(struct skcipher_request *req)
|
|||
u8 real_iv[16];
|
||||
int err;
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_xchacha_crypt(req);
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
|
|
@ -32,10 +32,11 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
#define CHKSUM_BLOCK_SIZE 1
|
||||
#define CHKSUM_DIGEST_SIZE 4
|
||||
|
@ -54,7 +55,7 @@ static u32 __attribute__((pure))
|
|||
unsigned int iremainder;
|
||||
unsigned int prealign;
|
||||
|
||||
if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !irq_fpu_usable())
|
||||
if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !crypto_simd_usable())
|
||||
return crc32_le(crc, p, len);
|
||||
|
||||
if ((long)p & SCALE_F_MASK) {
|
||||
|
|
|
@ -29,10 +29,11 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
#define CHKSUM_BLOCK_SIZE 1
|
||||
#define CHKSUM_DIGEST_SIZE 4
|
||||
|
@ -177,7 +178,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
|
|||
* use faster PCL version if datasize is large enough to
|
||||
* overcome kernel fpu state save/restore overhead
|
||||
*/
|
||||
if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
|
||||
if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
|
||||
kernel_fpu_begin();
|
||||
*crcp = crc_pcl(data, len, *crcp);
|
||||
kernel_fpu_end();
|
||||
|
@ -189,7 +190,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
|
|||
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
|
||||
u8 *out)
|
||||
{
|
||||
if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
|
||||
if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
|
||||
kernel_fpu_begin();
|
||||
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
|
||||
kernel_fpu_end();
|
||||
|
|
|
@ -26,12 +26,13 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/crc-t10dif.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
|
||||
|
||||
|
@ -53,7 +54,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
if (length >= 16 && irq_fpu_usable()) {
|
||||
if (length >= 16 && crypto_simd_usable()) {
|
||||
kernel_fpu_begin();
|
||||
ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
|
||||
kernel_fpu_end();
|
||||
|
@ -70,15 +71,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
|
||||
u8 *out)
|
||||
static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
|
||||
{
|
||||
if (len >= 16 && irq_fpu_usable()) {
|
||||
if (len >= 16 && crypto_simd_usable()) {
|
||||
kernel_fpu_begin();
|
||||
*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
|
||||
*(__u16 *)out = crc_t10dif_pcl(crc, data, len);
|
||||
kernel_fpu_end();
|
||||
} else
|
||||
*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
|
||||
*(__u16 *)out = crc_t10dif_generic(crc, data, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -87,15 +87,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
return __chksum_finup(&ctx->crc, data, len, out);
|
||||
return __chksum_finup(ctx->crc, data, len, out);
|
||||
}
|
||||
|
||||
static int chksum_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int length, u8 *out)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
return __chksum_finup(&ctx->crc, data, length, out);
|
||||
return __chksum_finup(0, data, length, out);
|
||||
}
|
||||
|
||||
static struct shash_alg alg = {
|
||||
|
|
|
@ -19,8 +19,9 @@
|
|||
#include <crypto/cryptd.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
#define GHASH_BLOCK_SIZE 16
|
||||
#define GHASH_DIGEST_SIZE 16
|
||||
|
@ -171,7 +172,6 @@ static int ghash_async_init(struct ahash_request *req)
|
|||
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
||||
|
||||
desc->tfm = child;
|
||||
desc->flags = req->base.flags;
|
||||
return crypto_shash_init(desc);
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ static int ghash_async_update(struct ahash_request *req)
|
|||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||
|
||||
if (!irq_fpu_usable() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||
|
@ -200,7 +200,7 @@ static int ghash_async_final(struct ahash_request *req)
|
|||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||
|
||||
if (!irq_fpu_usable() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||
|
@ -241,7 +241,7 @@ static int ghash_async_digest(struct ahash_request *req)
|
|||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||
|
||||
if (!irq_fpu_usable() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||
|
@ -251,7 +251,6 @@ static int ghash_async_digest(struct ahash_request *req)
|
|||
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
||||
|
||||
desc->tfm = child;
|
||||
desc->flags = req->base.flags;
|
||||
return shash_ahash_digest(req, desc);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/morus1280_glue.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_avx2_dec_tail(void *state, const void *src,
|
|||
asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
|
||||
u64 assoclen, u64 cryptlen);
|
||||
|
||||
MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
|
||||
MORUS1280_DECLARE_ALG(avx2, "morus1280-avx2", 400);
|
||||
|
||||
static struct simd_aead_alg *simd_alg;
|
||||
|
||||
static int __init crypto_morus1280_avx2_module_init(void)
|
||||
{
|
||||
|
@ -44,14 +47,13 @@ static int __init crypto_morus1280_avx2_module_init(void)
|
|||
!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
return crypto_register_aeads(crypto_morus1280_avx2_algs,
|
||||
ARRAY_SIZE(crypto_morus1280_avx2_algs));
|
||||
return simd_register_aeads_compat(&crypto_morus1280_avx2_alg, 1,
|
||||
&simd_alg);
|
||||
}
|
||||
|
||||
static void __exit crypto_morus1280_avx2_module_exit(void)
|
||||
{
|
||||
crypto_unregister_aeads(crypto_morus1280_avx2_algs,
|
||||
ARRAY_SIZE(crypto_morus1280_avx2_algs));
|
||||
simd_unregister_aeads(&crypto_morus1280_avx2_alg, 1, &simd_alg);
|
||||
}
|
||||
|
||||
module_init(crypto_morus1280_avx2_module_init);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/morus1280_glue.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_sse2_dec_tail(void *state, const void *src,
|
|||
asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
|
||||
u64 assoclen, u64 cryptlen);
|
||||
|
||||
MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
|
||||
MORUS1280_DECLARE_ALG(sse2, "morus1280-sse2", 350);
|
||||
|
||||
static struct simd_aead_alg *simd_alg;
|
||||
|
||||
static int __init crypto_morus1280_sse2_module_init(void)
|
||||
{
|
||||
|
@ -43,14 +46,13 @@ static int __init crypto_morus1280_sse2_module_init(void)
|
|||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
return crypto_register_aeads(crypto_morus1280_sse2_algs,
|
||||
ARRAY_SIZE(crypto_morus1280_sse2_algs));
|
||||
return simd_register_aeads_compat(&crypto_morus1280_sse2_alg, 1,
|
||||
&simd_alg);
|
||||
}
|
||||
|
||||
static void __exit crypto_morus1280_sse2_module_exit(void)
|
||||
{
|
||||
crypto_unregister_aeads(crypto_morus1280_sse2_algs,
|
||||
ARRAY_SIZE(crypto_morus1280_sse2_algs));
|
||||
simd_unregister_aeads(&crypto_morus1280_sse2_alg, 1, &simd_alg);
|
||||
}
|
||||
|
||||
module_init(crypto_morus1280_sse2_module_init);
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
* any later version.
|
||||
*/
|
||||
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/morus1280_glue.h>
|
||||
|
@ -205,90 +204,6 @@ void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops);
|
||||
|
||||
int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey);
|
||||
|
||||
int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize);
|
||||
|
||||
int cryptd_morus1280_glue_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt);
|
||||
|
||||
int cryptd_morus1280_glue_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt);
|
||||
|
||||
int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead *cryptd_tfm;
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
|
||||
char internal_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
|
||||
>= CRYPTO_MAX_ALG_NAME)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
|
||||
CRYPTO_ALG_INTERNAL);
|
||||
if (IS_ERR(cryptd_tfm))
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
|
||||
*ctx = cryptd_tfm;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm);
|
||||
|
||||
void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_free_aead(*ctx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
|
||||
MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/morus640_glue.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
@ -35,7 +36,9 @@ asmlinkage void crypto_morus640_sse2_dec_tail(void *state, const void *src,
|
|||
asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
|
||||
u64 assoclen, u64 cryptlen);
|
||||
|
||||
MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
|
||||
MORUS640_DECLARE_ALG(sse2, "morus640-sse2", 400);
|
||||
|
||||
static struct simd_aead_alg *simd_alg;
|
||||
|
||||
static int __init crypto_morus640_sse2_module_init(void)
|
||||
{
|
||||
|
@ -43,14 +46,13 @@ static int __init crypto_morus640_sse2_module_init(void)
|
|||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
|
||||
return -ENODEV;
|
||||
|
||||
return crypto_register_aeads(crypto_morus640_sse2_algs,
|
||||
ARRAY_SIZE(crypto_morus640_sse2_algs));
|
||||
return simd_register_aeads_compat(&crypto_morus640_sse2_alg, 1,
|
||||
&simd_alg);
|
||||
}
|
||||
|
||||
static void __exit crypto_morus640_sse2_module_exit(void)
|
||||
{
|
||||
crypto_unregister_aeads(crypto_morus640_sse2_algs,
|
||||
ARRAY_SIZE(crypto_morus640_sse2_algs));
|
||||
simd_unregister_aeads(&crypto_morus640_sse2_alg, 1, &simd_alg);
|
||||
}
|
||||
|
||||
module_init(crypto_morus640_sse2_module_init);
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
* any later version.
|
||||
*/
|
||||
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/morus640_glue.h>
|
||||
|
@ -200,90 +199,6 @@ void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops);
|
||||
|
||||
int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey);
|
||||
|
||||
int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize);
|
||||
|
||||
int cryptd_morus640_glue_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt);
|
||||
|
||||
int cryptd_morus640_glue_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
|
||||
aead = &cryptd_tfm->base;
|
||||
if (irq_fpu_usable() && (!in_atomic() ||
|
||||
!cryptd_aead_queued(cryptd_tfm)))
|
||||
aead = cryptd_aead_child(cryptd_tfm);
|
||||
|
||||
aead_request_set_tfm(req, aead);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt);
|
||||
|
||||
int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead *cryptd_tfm;
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
|
||||
char internal_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
|
||||
>= CRYPTO_MAX_ALG_NAME)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
|
||||
CRYPTO_ALG_INTERNAL);
|
||||
if (IS_ERR(cryptd_tfm))
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
|
||||
*ctx = cryptd_tfm;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm);
|
||||
|
||||
void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead)
|
||||
{
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(aead);
|
||||
|
||||
cryptd_free_aead(*ctx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
|
||||
MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");
|
||||
|
|
|
@ -7,9 +7,10 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/nhpoly1305.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
|
||||
u8 hash[NH_HASH_BYTES]);
|
||||
|
@ -24,7 +25,7 @@ static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len,
|
|||
static int nhpoly1305_avx2_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
if (srclen < 64 || !irq_fpu_usable())
|
||||
if (srclen < 64 || !crypto_simd_usable())
|
||||
return crypto_nhpoly1305_update(desc, src, srclen);
|
||||
|
||||
do {
|
||||
|
|
|
@ -7,9 +7,10 @@
|
|||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/nhpoly1305.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
|
||||
u8 hash[NH_HASH_BYTES]);
|
||||
|
@ -24,7 +25,7 @@ static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len,
|
|||
static int nhpoly1305_sse2_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
if (srclen < 64 || !irq_fpu_usable())
|
||||
if (srclen < 64 || !crypto_simd_usable())
|
||||
return crypto_nhpoly1305_update(desc, src, srclen);
|
||||
|
||||
do {
|
||||
|
|
|
@ -11,11 +11,11 @@
|
|||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/poly1305.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
struct poly1305_simd_desc_ctx {
|
||||
|
@ -126,7 +126,7 @@ static int poly1305_simd_update(struct shash_desc *desc,
|
|||
unsigned int bytes;
|
||||
|
||||
/* kernel_fpu_begin/end is costly, use fallback for small updates */
|
||||
if (srclen <= 288 || !may_use_simd())
|
||||
if (srclen <= 288 || !crypto_simd_usable())
|
||||
return crypto_poly1305_update(desc, src, srclen);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -29,7 +30,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha1_base.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
typedef void (sha1_transform_fn)(u32 *digest, const char *data,
|
||||
unsigned int rounds);
|
||||
|
@ -39,7 +40,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha1_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!irq_fpu_usable() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
|
||||
return crypto_sha1_update(desc, data, len);
|
||||
|
||||
|
@ -57,7 +58,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
|
|||
static int sha1_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out, sha1_transform_fn *sha1_xform)
|
||||
{
|
||||
if (!irq_fpu_usable())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha1_finup(desc, data, len, out);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -37,8 +38,8 @@
|
|||
#include <linux/types.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha256_base.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
|
||||
u64 rounds);
|
||||
|
@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!irq_fpu_usable() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
|
||||
return crypto_sha256_update(desc, data, len);
|
||||
|
||||
|
@ -67,7 +68,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
static int sha256_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out, sha256_transform_fn *sha256_xform)
|
||||
{
|
||||
if (!irq_fpu_usable())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha256_finup(desc, data, len, out);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
|
|
@ -28,16 +28,16 @@
|
|||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/sha512_base.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
|
||||
u64 rounds);
|
||||
|
@ -49,7 +49,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct sha512_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!irq_fpu_usable() ||
|
||||
if (!crypto_simd_usable() ||
|
||||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
|
||||
return crypto_sha512_update(desc, data, len);
|
||||
|
||||
|
@ -67,7 +67,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
static int sha512_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, u8 *out, sha512_transform_fn *sha512_xform)
|
||||
{
|
||||
if (!irq_fpu_usable())
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_sha512_finup(desc, data, len, out);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
|
|
@ -90,7 +90,6 @@ static int get_e820_md5(struct e820_table *table, void *buf)
|
|||
}
|
||||
|
||||
desc->tfm = tfm;
|
||||
desc->flags = 0;
|
||||
|
||||
size = offsetof(struct e820_table, entries) +
|
||||
sizeof(struct e820_entry) * table->nr_entries;
|
||||
|
|
|
@ -144,7 +144,7 @@ static int __init crypto842_mod_init(void)
|
|||
|
||||
return ret;
|
||||
}
|
||||
module_init(crypto842_mod_init);
|
||||
subsys_initcall(crypto842_mod_init);
|
||||
|
||||
static void __exit crypto842_mod_exit(void)
|
||||
{
|
||||
|
|
|
@ -27,8 +27,8 @@ config CRYPTO_FIPS
|
|||
depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
depends on (MODULE_SIG || !MODULES)
|
||||
help
|
||||
This options enables the fips boot option which is
|
||||
required if you want to system to operate in a FIPS 200
|
||||
This option enables the fips boot option which is
|
||||
required if you want the system to operate in a FIPS 200
|
||||
certification. You should say no unless you know what
|
||||
this is.
|
||||
|
||||
|
@ -113,29 +113,6 @@ config CRYPTO_ACOMP
|
|||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_ACOMP2
|
||||
|
||||
config CRYPTO_RSA
|
||||
tristate "RSA algorithm"
|
||||
select CRYPTO_AKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
select MPILIB
|
||||
select ASN1
|
||||
help
|
||||
Generic implementation of the RSA public key algorithm.
|
||||
|
||||
config CRYPTO_DH
|
||||
tristate "Diffie-Hellman algorithm"
|
||||
select CRYPTO_KPP
|
||||
select MPILIB
|
||||
help
|
||||
Generic implementation of the Diffie-Hellman algorithm.
|
||||
|
||||
config CRYPTO_ECDH
|
||||
tristate "ECDH algorithm"
|
||||
select CRYPTO_KPP
|
||||
select CRYPTO_RNG_DEFAULT
|
||||
help
|
||||
Generic implementation of the ECDH algorithm
|
||||
|
||||
config CRYPTO_MANAGER
|
||||
tristate "Cryptographic algorithm manager"
|
||||
select CRYPTO_MANAGER2
|
||||
|
@ -253,6 +230,48 @@ config CRYPTO_GLUE_HELPER_X86
|
|||
config CRYPTO_ENGINE
|
||||
tristate
|
||||
|
||||
comment "Public-key cryptography"
|
||||
|
||||
config CRYPTO_RSA
|
||||
tristate "RSA algorithm"
|
||||
select CRYPTO_AKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
select MPILIB
|
||||
select ASN1
|
||||
help
|
||||
Generic implementation of the RSA public key algorithm.
|
||||
|
||||
config CRYPTO_DH
|
||||
tristate "Diffie-Hellman algorithm"
|
||||
select CRYPTO_KPP
|
||||
select MPILIB
|
||||
help
|
||||
Generic implementation of the Diffie-Hellman algorithm.
|
||||
|
||||
config CRYPTO_ECC
|
||||
tristate
|
||||
|
||||
config CRYPTO_ECDH
|
||||
tristate "ECDH algorithm"
|
||||
select CRYPTO_ECC
|
||||
select CRYPTO_KPP
|
||||
select CRYPTO_RNG_DEFAULT
|
||||
help
|
||||
Generic implementation of the ECDH algorithm
|
||||
|
||||
config CRYPTO_ECRDSA
|
||||
tristate "EC-RDSA (GOST 34.10) algorithm"
|
||||
select CRYPTO_ECC
|
||||
select CRYPTO_AKCIPHER
|
||||
select CRYPTO_STREEBOG
|
||||
select OID_REGISTRY
|
||||
select ASN1
|
||||
help
|
||||
Elliptic Curve Russian Digital Signature Algorithm (GOST R 34.10-2012,
|
||||
RFC 7091, ISO/IEC 14888-3:2018) is one of the Russian cryptographic
|
||||
standard algorithms (called GOST algorithms). Only signature verification
|
||||
is implemented.
|
||||
|
||||
comment "Authenticated Encryption with Associated Data"
|
||||
|
||||
config CRYPTO_CCM
|
||||
|
@ -310,25 +329,25 @@ config CRYPTO_AEGIS128_AESNI_SSE2
|
|||
tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
AESNI+SSE2 implementation of the AEGSI-128 dedicated AEAD algorithm.
|
||||
AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm.
|
||||
|
||||
config CRYPTO_AEGIS128L_AESNI_SSE2
|
||||
tristate "AEGIS-128L AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
AESNI+SSE2 implementation of the AEGSI-128L dedicated AEAD algorithm.
|
||||
AESNI+SSE2 implementation of the AEGIS-128L dedicated AEAD algorithm.
|
||||
|
||||
config CRYPTO_AEGIS256_AESNI_SSE2
|
||||
tristate "AEGIS-256 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
AESNI+SSE2 implementation of the AEGSI-256 dedicated AEAD algorithm.
|
||||
AESNI+SSE2 implementation of the AEGIS-256 dedicated AEAD algorithm.
|
||||
|
||||
config CRYPTO_MORUS640
|
||||
tristate "MORUS-640 AEAD algorithm"
|
||||
|
@ -340,7 +359,7 @@ config CRYPTO_MORUS640_GLUE
|
|||
tristate
|
||||
depends on X86
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD
|
||||
algorithm.
|
||||
|
@ -363,7 +382,7 @@ config CRYPTO_MORUS1280_GLUE
|
|||
tristate
|
||||
depends on X86
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD
|
||||
algorithm.
|
||||
|
|
|
@ -147,12 +147,20 @@ obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
|
|||
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
|
||||
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
|
||||
obj-$(CONFIG_CRYPTO_OFB) += ofb.o
|
||||
obj-$(CONFIG_CRYPTO_ECC) += ecc.o
|
||||
|
||||
ecdh_generic-y := ecc.o
|
||||
ecdh_generic-y += ecdh.o
|
||||
ecdh_generic-y += ecdh_helper.o
|
||||
obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
|
||||
|
||||
$(obj)/ecrdsa_params.asn1.o: $(obj)/ecrdsa_params.asn1.c $(obj)/ecrdsa_params.asn1.h
|
||||
$(obj)/ecrdsa_pub_key.asn1.o: $(obj)/ecrdsa_pub_key.asn1.c $(obj)/ecrdsa_pub_key.asn1.h
|
||||
$(obj)/ecrdsa.o: $(obj)/ecrdsa_params.asn1.h $(obj)/ecrdsa_pub_key.asn1.h
|
||||
ecrdsa_generic-y += ecrdsa.o
|
||||
ecrdsa_generic-y += ecrdsa_params.asn1.o
|
||||
ecrdsa_generic-y += ecrdsa_pub_key.asn1.o
|
||||
obj-$(CONFIG_CRYPTO_ECRDSA) += ecrdsa_generic.o
|
||||
|
||||
#
|
||||
# generic algorithms and the async_tx api
|
||||
#
|
||||
|
|
|
@ -265,7 +265,6 @@ static int adiantum_hash_message(struct skcipher_request *req,
|
|||
int err;
|
||||
|
||||
hash_desc->tfm = tctx->hash;
|
||||
hash_desc->flags = 0;
|
||||
|
||||
err = crypto_shash_init(hash_desc);
|
||||
if (err)
|
||||
|
@ -659,7 +658,7 @@ static void __exit adiantum_module_exit(void)
|
|||
crypto_unregister_template(&adiantum_tmpl);
|
||||
}
|
||||
|
||||
module_init(adiantum_module_init);
|
||||
subsys_initcall(adiantum_module_init);
|
||||
module_exit(adiantum_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
|
||||
|
|
|
@ -448,7 +448,7 @@ static void __exit crypto_aegis128_module_exit(void)
|
|||
crypto_unregister_aead(&crypto_aegis128_alg);
|
||||
}
|
||||
|
||||
module_init(crypto_aegis128_module_init);
|
||||
subsys_initcall(crypto_aegis128_module_init);
|
||||
module_exit(crypto_aegis128_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -512,7 +512,7 @@ static void __exit crypto_aegis128l_module_exit(void)
|
|||
crypto_unregister_aead(&crypto_aegis128l_alg);
|
||||
}
|
||||
|
||||
module_init(crypto_aegis128l_module_init);
|
||||
subsys_initcall(crypto_aegis128l_module_init);
|
||||
module_exit(crypto_aegis128l_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -463,7 +463,7 @@ static void __exit crypto_aegis256_module_exit(void)
|
|||
crypto_unregister_aead(&crypto_aegis256_alg);
|
||||
}
|
||||
|
||||
module_init(crypto_aegis256_module_init);
|
||||
subsys_initcall(crypto_aegis256_module_init);
|
||||
module_exit(crypto_aegis256_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -64,7 +64,7 @@ static inline u8 byte(const u32 x, const unsigned n)
|
|||
static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
|
||||
|
||||
/* cacheline-aligned to facilitate prefetching into cache */
|
||||
__visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
|
||||
__visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = {
|
||||
{
|
||||
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
|
||||
0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
|
||||
|
@ -328,7 +328,7 @@ __visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
|
|||
}
|
||||
};
|
||||
|
||||
__visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
|
||||
__visible const u32 crypto_fl_tab[4][256] ____cacheline_aligned = {
|
||||
{
|
||||
0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
|
||||
0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
|
||||
|
@ -592,7 +592,7 @@ __visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
|
|||
}
|
||||
};
|
||||
|
||||
__visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
|
||||
__visible const u32 crypto_it_tab[4][256] ____cacheline_aligned = {
|
||||
{
|
||||
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
|
||||
0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
|
||||
|
@ -856,7 +856,7 @@ __visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
|
|||
}
|
||||
};
|
||||
|
||||
__visible const u32 crypto_il_tab[4][256] __cacheline_aligned = {
|
||||
__visible const u32 crypto_il_tab[4][256] ____cacheline_aligned = {
|
||||
{
|
||||
0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
|
||||
0x00000030, 0x00000036, 0x000000a5, 0x00000038,
|
||||
|
@ -1470,7 +1470,7 @@ static void __exit aes_fini(void)
|
|||
crypto_unregister_alg(&aes_alg);
|
||||
}
|
||||
|
||||
module_init(aes_init);
|
||||
subsys_initcall(aes_init);
|
||||
module_exit(aes_fini);
|
||||
|
||||
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
||||
|
|
|
@ -119,10 +119,24 @@ static void akcipher_prepare_alg(struct akcipher_alg *alg)
|
|||
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
|
||||
}
|
||||
|
||||
static int akcipher_default_op(struct akcipher_request *req)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int crypto_register_akcipher(struct akcipher_alg *alg)
|
||||
{
|
||||
struct crypto_alg *base = &alg->base;
|
||||
|
||||
if (!alg->sign)
|
||||
alg->sign = akcipher_default_op;
|
||||
if (!alg->verify)
|
||||
alg->verify = akcipher_default_op;
|
||||
if (!alg->encrypt)
|
||||
alg->encrypt = akcipher_default_op;
|
||||
if (!alg->decrypt)
|
||||
alg->decrypt = akcipher_default_op;
|
||||
|
||||
akcipher_prepare_alg(alg);
|
||||
return crypto_register_alg(base);
|
||||
}
|
||||
|
|
|
@ -296,7 +296,13 @@ static void __exit cryptomgr_exit(void)
|
|||
BUG_ON(err);
|
||||
}
|
||||
|
||||
subsys_initcall(cryptomgr_init);
|
||||
/*
|
||||
* This is arch_initcall() so that the crypto self-tests are run on algorithms
|
||||
* registered early by subsys_initcall(). subsys_initcall() is needed for
|
||||
* generic implementations so that they're available for comparison tests when
|
||||
* other implementations are registered later by module_init().
|
||||
*/
|
||||
arch_initcall(cryptomgr_init);
|
||||
module_exit(cryptomgr_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -472,7 +472,7 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
|
|||
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
|
||||
module_param(dbg, int, 0);
|
||||
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
|
||||
module_init(prng_mod_init);
|
||||
subsys_initcall(prng_mod_init);
|
||||
module_exit(prng_mod_fini);
|
||||
MODULE_ALIAS_CRYPTO("stdrng");
|
||||
MODULE_ALIAS_CRYPTO("ansi_cprng");
|
||||
|
|
|
@ -699,7 +699,7 @@ static void __exit anubis_mod_fini(void)
|
|||
crypto_unregister_alg(&anubis_alg);
|
||||
}
|
||||
|
||||
module_init(anubis_mod_init);
|
||||
subsys_initcall(anubis_mod_init);
|
||||
module_exit(anubis_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -163,7 +163,7 @@ static void __exit arc4_exit(void)
|
|||
crypto_unregister_skcipher(&arc4_skcipher);
|
||||
}
|
||||
|
||||
module_init(arc4_init);
|
||||
subsys_initcall(arc4_init);
|
||||
module_exit(arc4_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -276,6 +276,10 @@ static int tpm_sign(struct tpm_buf *tb,
|
|||
|
||||
return datalen;
|
||||
}
|
||||
|
||||
/* Room to fit two u32 zeros for algo id and parameters length. */
|
||||
#define SETKEY_PARAMS_SIZE (sizeof(u32) * 2)
|
||||
|
||||
/*
|
||||
* Maximum buffer size for the BER/DER encoded public key. The public key
|
||||
* is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048
|
||||
|
@ -286,8 +290,9 @@ static int tpm_sign(struct tpm_buf *tb,
|
|||
* - 257 bytes of n
|
||||
* - max 2 bytes for INTEGER e type/length
|
||||
* - 3 bytes of e
|
||||
* - 4+4 of zeros for set_pub_key parameters (SETKEY_PARAMS_SIZE)
|
||||
*/
|
||||
#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3)
|
||||
#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3 + SETKEY_PARAMS_SIZE)
|
||||
|
||||
/*
|
||||
* Provide a part of a description of the key for /proc/keys.
|
||||
|
@ -364,6 +369,8 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
|
|||
cur = encode_tag_length(cur, 0x02, sizeof(e));
|
||||
memcpy(cur, e, sizeof(e));
|
||||
cur += sizeof(e);
|
||||
/* Zero parameters to satisfy set_pub_key ABI. */
|
||||
memset(cur, 0, SETKEY_PARAMS_SIZE);
|
||||
|
||||
return cur - buf;
|
||||
}
|
||||
|
@ -744,12 +751,10 @@ static int tpm_key_verify_signature(const struct key *key,
|
|||
struct crypto_wait cwait;
|
||||
struct crypto_akcipher *tfm;
|
||||
struct akcipher_request *req;
|
||||
struct scatterlist sig_sg, digest_sg;
|
||||
struct scatterlist src_sg[2];
|
||||
char alg_name[CRYPTO_MAX_ALG_NAME];
|
||||
uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
|
||||
uint32_t der_pub_key_len;
|
||||
void *output;
|
||||
unsigned int outlen;
|
||||
int ret;
|
||||
|
||||
pr_devel("==>%s()\n", __func__);
|
||||
|
@ -781,37 +786,17 @@ static int tpm_key_verify_signature(const struct key *key,
|
|||
if (!req)
|
||||
goto error_free_tfm;
|
||||
|
||||
ret = -ENOMEM;
|
||||
outlen = crypto_akcipher_maxsize(tfm);
|
||||
output = kmalloc(outlen, GFP_KERNEL);
|
||||
if (!output)
|
||||
goto error_free_req;
|
||||
|
||||
sg_init_one(&sig_sg, sig->s, sig->s_size);
|
||||
sg_init_one(&digest_sg, output, outlen);
|
||||
akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
|
||||
outlen);
|
||||
sg_init_table(src_sg, 2);
|
||||
sg_set_buf(&src_sg[0], sig->s, sig->s_size);
|
||||
sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
|
||||
akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
|
||||
sig->digest_size);
|
||||
crypto_init_wait(&cwait);
|
||||
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
crypto_req_done, &cwait);
|
||||
|
||||
/* Perform the verification calculation. This doesn't actually do the
|
||||
* verification, but rather calculates the hash expected by the
|
||||
* signature and returns that to us.
|
||||
*/
|
||||
ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
|
||||
if (ret)
|
||||
goto out_free_output;
|
||||
|
||||
/* Do the actual verification step. */
|
||||
if (req->dst_len != sig->digest_size ||
|
||||
memcmp(sig->digest, output, sig->digest_size) != 0)
|
||||
ret = -EKEYREJECTED;
|
||||
|
||||
out_free_output:
|
||||
kfree(output);
|
||||
error_free_req:
|
||||
akcipher_request_free(req);
|
||||
error_free_tfm:
|
||||
crypto_free_akcipher(tfm);
|
||||
|
|
|
@ -56,7 +56,6 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
|
|||
goto error_no_desc;
|
||||
|
||||
desc->tfm = tfm;
|
||||
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
/* Digest the message [RFC2315 9.3] */
|
||||
ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
|
||||
|
|
|
@ -45,6 +45,7 @@ void public_key_free(struct public_key *key)
|
|||
{
|
||||
if (key) {
|
||||
kfree(key->key);
|
||||
kfree(key->params);
|
||||
kfree(key);
|
||||
}
|
||||
}
|
||||
|
@ -94,6 +95,12 @@ int software_key_determine_akcipher(const char *encoding,
|
|||
return -ENOPKG;
|
||||
}
|
||||
|
||||
static u8 *pkey_pack_u32(u8 *dst, u32 val)
|
||||
{
|
||||
memcpy(dst, &val, sizeof(val));
|
||||
return dst + sizeof(val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Query information about a key.
|
||||
*/
|
||||
|
@ -103,6 +110,7 @@ static int software_key_query(const struct kernel_pkey_params *params,
|
|||
struct crypto_akcipher *tfm;
|
||||
struct public_key *pkey = params->key->payload.data[asym_crypto];
|
||||
char alg_name[CRYPTO_MAX_ALG_NAME];
|
||||
u8 *key, *ptr;
|
||||
int ret, len;
|
||||
|
||||
ret = software_key_determine_akcipher(params->encoding,
|
||||
|
@ -115,14 +123,22 @@ static int software_key_query(const struct kernel_pkey_params *params,
|
|||
if (IS_ERR(tfm))
|
||||
return PTR_ERR(tfm);
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_akcipher_set_priv_key(tfm,
|
||||
pkey->key, pkey->keylen);
|
||||
else
|
||||
ret = crypto_akcipher_set_pub_key(tfm,
|
||||
pkey->key, pkey->keylen);
|
||||
if (ret < 0)
|
||||
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
|
||||
GFP_KERNEL);
|
||||
if (!key)
|
||||
goto error_free_tfm;
|
||||
memcpy(key, pkey->key, pkey->keylen);
|
||||
ptr = key + pkey->keylen;
|
||||
ptr = pkey_pack_u32(ptr, pkey->algo);
|
||||
ptr = pkey_pack_u32(ptr, pkey->paramlen);
|
||||
memcpy(ptr, pkey->params, pkey->paramlen);
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
|
||||
else
|
||||
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
|
||||
if (ret < 0)
|
||||
goto error_free_key;
|
||||
|
||||
len = crypto_akcipher_maxsize(tfm);
|
||||
info->key_size = len * 8;
|
||||
|
@ -137,6 +153,8 @@ static int software_key_query(const struct kernel_pkey_params *params,
|
|||
KEYCTL_SUPPORTS_SIGN);
|
||||
ret = 0;
|
||||
|
||||
error_free_key:
|
||||
kfree(key);
|
||||
error_free_tfm:
|
||||
crypto_free_akcipher(tfm);
|
||||
pr_devel("<==%s() = %d\n", __func__, ret);
|
||||
|
@ -155,6 +173,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
|
|||
struct crypto_wait cwait;
|
||||
struct scatterlist in_sg, out_sg;
|
||||
char alg_name[CRYPTO_MAX_ALG_NAME];
|
||||
char *key, *ptr;
|
||||
int ret;
|
||||
|
||||
pr_devel("==>%s()\n", __func__);
|
||||
|
@ -173,15 +192,24 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
|
|||
if (!req)
|
||||
goto error_free_tfm;
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_akcipher_set_priv_key(tfm,
|
||||
pkey->key, pkey->keylen);
|
||||
else
|
||||
ret = crypto_akcipher_set_pub_key(tfm,
|
||||
pkey->key, pkey->keylen);
|
||||
if (ret)
|
||||
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
|
||||
GFP_KERNEL);
|
||||
if (!key)
|
||||
goto error_free_req;
|
||||
|
||||
memcpy(key, pkey->key, pkey->keylen);
|
||||
ptr = key + pkey->keylen;
|
||||
ptr = pkey_pack_u32(ptr, pkey->algo);
|
||||
ptr = pkey_pack_u32(ptr, pkey->paramlen);
|
||||
memcpy(ptr, pkey->params, pkey->paramlen);
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
|
||||
else
|
||||
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
|
||||
if (ret)
|
||||
goto error_free_key;
|
||||
|
||||
sg_init_one(&in_sg, in, params->in_len);
|
||||
sg_init_one(&out_sg, out, params->out_len);
|
||||
akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
|
||||
|
@ -210,6 +238,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
|
|||
if (ret == 0)
|
||||
ret = req->dst_len;
|
||||
|
||||
error_free_key:
|
||||
kfree(key);
|
||||
error_free_req:
|
||||
akcipher_request_free(req);
|
||||
error_free_tfm:
|
||||
|
@ -227,10 +257,9 @@ int public_key_verify_signature(const struct public_key *pkey,
|
|||
struct crypto_wait cwait;
|
||||
struct crypto_akcipher *tfm;
|
||||
struct akcipher_request *req;
|
||||
struct scatterlist sig_sg, digest_sg;
|
||||
struct scatterlist src_sg[2];
|
||||
char alg_name[CRYPTO_MAX_ALG_NAME];
|
||||
void *output;
|
||||
unsigned int outlen;
|
||||
char *key, *ptr;
|
||||
int ret;
|
||||
|
||||
pr_devel("==>%s()\n", __func__);
|
||||
|
@ -254,45 +283,37 @@ int public_key_verify_signature(const struct public_key *pkey,
|
|||
if (!req)
|
||||
goto error_free_tfm;
|
||||
|
||||
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
|
||||
GFP_KERNEL);
|
||||
if (!key)
|
||||
goto error_free_req;
|
||||
|
||||
memcpy(key, pkey->key, pkey->keylen);
|
||||
ptr = key + pkey->keylen;
|
||||
ptr = pkey_pack_u32(ptr, pkey->algo);
|
||||
ptr = pkey_pack_u32(ptr, pkey->paramlen);
|
||||
memcpy(ptr, pkey->params, pkey->paramlen);
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_akcipher_set_priv_key(tfm,
|
||||
pkey->key, pkey->keylen);
|
||||
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
|
||||
else
|
||||
ret = crypto_akcipher_set_pub_key(tfm,
|
||||
pkey->key, pkey->keylen);
|
||||
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
|
||||
if (ret)
|
||||
goto error_free_req;
|
||||
goto error_free_key;
|
||||
|
||||
ret = -ENOMEM;
|
||||
outlen = crypto_akcipher_maxsize(tfm);
|
||||
output = kmalloc(outlen, GFP_KERNEL);
|
||||
if (!output)
|
||||
goto error_free_req;
|
||||
|
||||
sg_init_one(&sig_sg, sig->s, sig->s_size);
|
||||
sg_init_one(&digest_sg, output, outlen);
|
||||
akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
|
||||
outlen);
|
||||
sg_init_table(src_sg, 2);
|
||||
sg_set_buf(&src_sg[0], sig->s, sig->s_size);
|
||||
sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
|
||||
akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
|
||||
sig->digest_size);
|
||||
crypto_init_wait(&cwait);
|
||||
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
crypto_req_done, &cwait);
|
||||
|
||||
/* Perform the verification calculation. This doesn't actually do the
|
||||
* verification, but rather calculates the hash expected by the
|
||||
* signature and returns that to us.
|
||||
*/
|
||||
ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
|
||||
if (ret)
|
||||
goto out_free_output;
|
||||
|
||||
/* Do the actual verification step. */
|
||||
if (req->dst_len != sig->digest_size ||
|
||||
memcmp(sig->digest, output, sig->digest_size) != 0)
|
||||
ret = -EKEYREJECTED;
|
||||
|
||||
out_free_output:
|
||||
kfree(output);
|
||||
error_free_key:
|
||||
kfree(key);
|
||||
error_free_req:
|
||||
akcipher_request_free(req);
|
||||
error_free_tfm:
|
||||
|
|
|
@ -354,7 +354,6 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
|
|||
goto error_no_desc;
|
||||
|
||||
desc->tfm = tfm;
|
||||
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = crypto_shash_init(desc);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
|
|
@ -22,7 +22,7 @@ CertificateSerialNumber ::= INTEGER
|
|||
|
||||
AlgorithmIdentifier ::= SEQUENCE {
|
||||
algorithm OBJECT IDENTIFIER ({ x509_note_OID }),
|
||||
parameters ANY OPTIONAL
|
||||
parameters ANY OPTIONAL ({ x509_note_params })
|
||||
}
|
||||
|
||||
Name ::= SEQUENCE OF RelativeDistinguishedName
|
||||
|
|
|
@ -26,6 +26,9 @@ struct x509_parse_context {
|
|||
const void *cert_start; /* Start of cert content */
|
||||
const void *key; /* Key data */
|
||||
size_t key_size; /* Size of key data */
|
||||
const void *params; /* Key parameters */
|
||||
size_t params_size; /* Size of key parameters */
|
||||
enum OID key_algo; /* Public key algorithm */
|
||||
enum OID last_oid; /* Last OID encountered */
|
||||
enum OID algo_oid; /* Algorithm OID */
|
||||
unsigned char nr_mpi; /* Number of MPIs stored */
|
||||
|
@ -109,6 +112,13 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
|
|||
|
||||
cert->pub->keylen = ctx->key_size;
|
||||
|
||||
cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL);
|
||||
if (!cert->pub->params)
|
||||
goto error_decode;
|
||||
|
||||
cert->pub->paramlen = ctx->params_size;
|
||||
cert->pub->algo = ctx->key_algo;
|
||||
|
||||
/* Grab the signature bits */
|
||||
ret = x509_get_sig_params(cert);
|
||||
if (ret < 0)
|
||||
|
@ -220,6 +230,14 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
|
|||
case OID_sha224WithRSAEncryption:
|
||||
ctx->cert->sig->hash_algo = "sha224";
|
||||
goto rsa_pkcs1;
|
||||
|
||||
case OID_gost2012Signature256:
|
||||
ctx->cert->sig->hash_algo = "streebog256";
|
||||
goto ecrdsa;
|
||||
|
||||
case OID_gost2012Signature512:
|
||||
ctx->cert->sig->hash_algo = "streebog512";
|
||||
goto ecrdsa;
|
||||
}
|
||||
|
||||
rsa_pkcs1:
|
||||
|
@ -227,6 +245,11 @@ rsa_pkcs1:
|
|||
ctx->cert->sig->encoding = "pkcs1";
|
||||
ctx->algo_oid = ctx->last_oid;
|
||||
return 0;
|
||||
ecrdsa:
|
||||
ctx->cert->sig->pkey_algo = "ecrdsa";
|
||||
ctx->cert->sig->encoding = "raw";
|
||||
ctx->algo_oid = ctx->last_oid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -246,7 +269,8 @@ int x509_note_signature(void *context, size_t hdrlen,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
|
||||
if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
|
||||
strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0) {
|
||||
/* Discard the BIT STRING metadata */
|
||||
if (vlen < 1 || *(const u8 *)value != 0)
|
||||
return -EBADMSG;
|
||||
|
@ -400,6 +424,27 @@ int x509_note_subject(void *context, size_t hdrlen,
|
|||
return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->subject, vlen);
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the parameters for the public key
|
||||
*/
|
||||
int x509_note_params(void *context, size_t hdrlen,
|
||||
unsigned char tag,
|
||||
const void *value, size_t vlen)
|
||||
{
|
||||
struct x509_parse_context *ctx = context;
|
||||
|
||||
/*
|
||||
* AlgorithmIdentifier is used three times in the x509, we should skip
|
||||
* first and ignore third, using second one which is after subject and
|
||||
* before subjectPublicKey.
|
||||
*/
|
||||
if (!ctx->cert->raw_subject || ctx->key)
|
||||
return 0;
|
||||
ctx->params = value - hdrlen;
|
||||
ctx->params_size = vlen + hdrlen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the data for the public key algorithm
|
||||
*/
|
||||
|
@ -409,10 +454,14 @@ int x509_extract_key_data(void *context, size_t hdrlen,
|
|||
{
|
||||
struct x509_parse_context *ctx = context;
|
||||
|
||||
if (ctx->last_oid != OID_rsaEncryption)
|
||||
return -ENOPKG;
|
||||
|
||||
ctx->key_algo = ctx->last_oid;
|
||||
if (ctx->last_oid == OID_rsaEncryption)
|
||||
ctx->cert->pub->pkey_algo = "rsa";
|
||||
else if (ctx->last_oid == OID_gost2012PKey256 ||
|
||||
ctx->last_oid == OID_gost2012PKey512)
|
||||
ctx->cert->pub->pkey_algo = "ecrdsa";
|
||||
else
|
||||
return -ENOPKG;
|
||||
|
||||
/* Discard the BIT STRING metadata */
|
||||
if (vlen < 1 || *(const u8 *)value != 0)
|
||||
|
|
|
@ -77,7 +77,6 @@ int x509_get_sig_params(struct x509_certificate *cert)
|
|||
goto error;
|
||||
|
||||
desc->tfm = tfm;
|
||||
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -508,7 +508,7 @@ static void __exit crypto_authenc_module_exit(void)
|
|||
crypto_unregister_template(&crypto_authenc_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_authenc_module_init);
|
||||
subsys_initcall(crypto_authenc_module_init);
|
||||
module_exit(crypto_authenc_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -523,7 +523,7 @@ static void __exit crypto_authenc_esn_module_exit(void)
|
|||
crypto_unregister_template(&crypto_authenc_esn_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_authenc_esn_module_init);
|
||||
subsys_initcall(crypto_authenc_esn_module_init);
|
||||
module_exit(crypto_authenc_esn_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -133,7 +133,7 @@ static void __exit blowfish_mod_fini(void)
|
|||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
|
||||
module_init(blowfish_mod_init);
|
||||
subsys_initcall(blowfish_mod_init);
|
||||
module_exit(blowfish_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -1092,7 +1092,7 @@ static void __exit camellia_fini(void)
|
|||
crypto_unregister_alg(&camellia_alg);
|
||||
}
|
||||
|
||||
module_init(camellia_init);
|
||||
subsys_initcall(camellia_init);
|
||||
module_exit(camellia_fini);
|
||||
|
||||
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
|
||||
|
|
|
@ -543,7 +543,7 @@ static void __exit cast5_mod_fini(void)
|
|||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
|
||||
module_init(cast5_mod_init);
|
||||
subsys_initcall(cast5_mod_init);
|
||||
module_exit(cast5_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -285,7 +285,7 @@ static void __exit cast6_mod_fini(void)
|
|||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
|
||||
module_init(cast6_mod_init);
|
||||
subsys_initcall(cast6_mod_init);
|
||||
module_exit(cast6_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -98,7 +98,7 @@ static void __exit crypto_cbc_module_exit(void)
|
|||
crypto_unregister_template(&crypto_cbc_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_cbc_module_init);
|
||||
subsys_initcall(crypto_cbc_module_init);
|
||||
module_exit(crypto_cbc_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
46
crypto/ccm.c
46
crypto/ccm.c
|
@ -458,7 +458,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
|
|||
|
||||
static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
||||
struct rtattr **tb,
|
||||
const char *full_name,
|
||||
const char *ctr_name,
|
||||
const char *mac_name)
|
||||
{
|
||||
|
@ -486,7 +485,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
|||
|
||||
mac = __crypto_hash_alg_common(mac_alg);
|
||||
err = -EINVAL;
|
||||
if (mac->digestsize != 16)
|
||||
if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
|
||||
mac->digestsize != 16)
|
||||
goto out_put_mac;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
|
||||
|
@ -509,23 +509,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
|||
|
||||
ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
|
||||
|
||||
/* Not a stream cipher? */
|
||||
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
|
||||
err = -EINVAL;
|
||||
if (ctr->base.cra_blocksize != 1)
|
||||
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
|
||||
crypto_skcipher_alg_ivsize(ctr) != 16 ||
|
||||
ctr->base.cra_blocksize != 1)
|
||||
goto err_drop_ctr;
|
||||
|
||||
/* We want the real thing! */
|
||||
if (crypto_skcipher_alg_ivsize(ctr) != 16)
|
||||
/* ctr and cbcmac must use the same underlying block cipher. */
|
||||
if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
|
||||
goto err_drop_ctr;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_ctr;
|
||||
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"ccm_base(%s,%s)", ctr->base.cra_driver_name,
|
||||
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_ctr;
|
||||
|
||||
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
|
||||
|
||||
inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = (mac->base.cra_priority +
|
||||
ctr->base.cra_priority) / 2;
|
||||
|
@ -567,7 +571,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
const char *cipher_name;
|
||||
char ctr_name[CRYPTO_MAX_ALG_NAME];
|
||||
char mac_name[CRYPTO_MAX_ALG_NAME];
|
||||
char full_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(cipher_name))
|
||||
|
@ -581,35 +584,24 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
cipher_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
|
||||
mac_name);
|
||||
return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
|
||||
}
|
||||
|
||||
static int crypto_ccm_base_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
const char *ctr_name;
|
||||
const char *cipher_name;
|
||||
char full_name[CRYPTO_MAX_ALG_NAME];
|
||||
const char *mac_name;
|
||||
|
||||
ctr_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(ctr_name))
|
||||
return PTR_ERR(ctr_name);
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[2]);
|
||||
if (IS_ERR(cipher_name))
|
||||
return PTR_ERR(cipher_name);
|
||||
mac_name = crypto_attr_alg_name(tb[2]);
|
||||
if (IS_ERR(mac_name))
|
||||
return PTR_ERR(mac_name);
|
||||
|
||||
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
|
||||
ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
|
||||
cipher_name);
|
||||
return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
|
||||
}
|
||||
|
||||
static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
|
||||
|
@ -1014,7 +1006,7 @@ static void __exit crypto_ccm_module_exit(void)
|
|||
ARRAY_SIZE(crypto_ccm_tmpls));
|
||||
}
|
||||
|
||||
module_init(crypto_ccm_module_init);
|
||||
subsys_initcall(crypto_ccm_module_init);
|
||||
module_exit(crypto_ccm_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -243,7 +243,7 @@ static void __exit crypto_cfb_module_exit(void)
|
|||
crypto_unregister_template(&crypto_cfb_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_cfb_module_init);
|
||||
subsys_initcall(crypto_cfb_module_init);
|
||||
module_exit(crypto_cfb_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -645,8 +645,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
|||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s,%s)", name, chacha_name,
|
||||
poly_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
"%s(%s,%s)", name, chacha->base.cra_name,
|
||||
poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_chacha;
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s,%s)", name, chacha->base.cra_driver_name,
|
||||
|
@ -725,7 +725,7 @@ static void __exit chacha20poly1305_module_exit(void)
|
|||
ARRAY_SIZE(rfc7539_tmpls));
|
||||
}
|
||||
|
||||
module_init(chacha20poly1305_module_init);
|
||||
subsys_initcall(chacha20poly1305_module_init);
|
||||
module_exit(chacha20poly1305_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -22,18 +22,16 @@ static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
|
|||
/* aligned to potentially speed up crypto_xor() */
|
||||
u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
|
||||
|
||||
if (dst != src)
|
||||
memcpy(dst, src, bytes);
|
||||
|
||||
while (bytes >= CHACHA_BLOCK_SIZE) {
|
||||
chacha_block(state, stream, nrounds);
|
||||
crypto_xor(dst, stream, CHACHA_BLOCK_SIZE);
|
||||
crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
|
||||
bytes -= CHACHA_BLOCK_SIZE;
|
||||
dst += CHACHA_BLOCK_SIZE;
|
||||
src += CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
if (bytes) {
|
||||
chacha_block(state, stream, nrounds);
|
||||
crypto_xor(dst, stream, bytes);
|
||||
crypto_xor_cpy(dst, src, stream, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,7 +50,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
|
|||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
|
||||
|
||||
chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes, ctx->nrounds);
|
||||
|
@ -203,7 +201,7 @@ static void __exit chacha_generic_mod_fini(void)
|
|||
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
module_init(chacha_generic_mod_init);
|
||||
subsys_initcall(chacha_generic_mod_init);
|
||||
module_exit(chacha_generic_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -313,7 +313,7 @@ static void __exit crypto_cmac_module_exit(void)
|
|||
crypto_unregister_template(&crypto_cmac_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_cmac_module_init);
|
||||
subsys_initcall(crypto_cmac_module_init);
|
||||
module_exit(crypto_cmac_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -146,7 +146,7 @@ static void __exit crc32_mod_fini(void)
|
|||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(crc32_mod_init);
|
||||
subsys_initcall(crc32_mod_init);
|
||||
module_exit(crc32_mod_fini);
|
||||
|
||||
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
|
||||
|
|
|
@ -165,7 +165,7 @@ static void __exit crc32c_mod_fini(void)
|
|||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(crc32c_mod_init);
|
||||
subsys_initcall(crc32c_mod_init);
|
||||
module_exit(crc32c_mod_fini);
|
||||
|
||||
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
|
||||
|
|
|
@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
|
||||
u8 *out)
|
||||
static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
|
||||
{
|
||||
*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
|
||||
*(__u16 *)out = crc_t10dif_generic(crc, data, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
|
|||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
return __chksum_finup(&ctx->crc, data, len, out);
|
||||
return __chksum_finup(ctx->crc, data, len, out);
|
||||
}
|
||||
|
||||
static int chksum_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int length, u8 *out)
|
||||
{
|
||||
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
|
||||
|
||||
return __chksum_finup(&ctx->crc, data, length, out);
|
||||
return __chksum_finup(0, data, length, out);
|
||||
}
|
||||
|
||||
static struct shash_alg alg = {
|
||||
|
@ -115,7 +112,7 @@ static void __exit crct10dif_mod_fini(void)
|
|||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(crct10dif_mod_init);
|
||||
subsys_initcall(crct10dif_mod_init);
|
||||
module_exit(crct10dif_mod_fini);
|
||||
|
||||
MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
|
||||
|
|
252
crypto/cryptd.c
252
crypto/cryptd.c
|
@ -65,15 +65,6 @@ struct aead_instance_ctx {
|
|||
struct cryptd_queue *queue;
|
||||
};
|
||||
|
||||
struct cryptd_blkcipher_ctx {
|
||||
atomic_t refcnt;
|
||||
struct crypto_blkcipher *child;
|
||||
};
|
||||
|
||||
struct cryptd_blkcipher_request_ctx {
|
||||
crypto_completion_t complete;
|
||||
};
|
||||
|
||||
struct cryptd_skcipher_ctx {
|
||||
atomic_t refcnt;
|
||||
struct crypto_sync_skcipher *child;
|
||||
|
@ -216,129 +207,6 @@ static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
|
|||
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
|
||||
}
|
||||
|
||||
static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
|
||||
struct crypto_blkcipher *child = ctx->child;
|
||||
int err;
|
||||
|
||||
crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_blkcipher_setkey(child, key, keylen);
|
||||
crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
|
||||
struct crypto_blkcipher *child,
|
||||
int err,
|
||||
int (*crypt)(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src,
|
||||
unsigned int len))
|
||||
{
|
||||
struct cryptd_blkcipher_request_ctx *rctx;
|
||||
struct cryptd_blkcipher_ctx *ctx;
|
||||
struct crypto_ablkcipher *tfm;
|
||||
struct blkcipher_desc desc;
|
||||
int refcnt;
|
||||
|
||||
rctx = ablkcipher_request_ctx(req);
|
||||
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
|
||||
desc.tfm = child;
|
||||
desc.info = req->info;
|
||||
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
err = crypt(&desc, req->dst, req->src, req->nbytes);
|
||||
|
||||
req->base.complete = rctx->complete;
|
||||
|
||||
out:
|
||||
tfm = crypto_ablkcipher_reqtfm(req);
|
||||
ctx = crypto_ablkcipher_ctx(tfm);
|
||||
refcnt = atomic_read(&ctx->refcnt);
|
||||
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, err);
|
||||
local_bh_enable();
|
||||
|
||||
if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
|
||||
crypto_free_ablkcipher(tfm);
|
||||
}
|
||||
|
||||
static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
|
||||
struct crypto_blkcipher *child = ctx->child;
|
||||
|
||||
cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
|
||||
crypto_blkcipher_crt(child)->encrypt);
|
||||
}
|
||||
|
||||
static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
|
||||
struct crypto_blkcipher *child = ctx->child;
|
||||
|
||||
cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
|
||||
crypto_blkcipher_crt(child)->decrypt);
|
||||
}
|
||||
|
||||
static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
|
||||
crypto_completion_t compl)
|
||||
{
|
||||
struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct cryptd_queue *queue;
|
||||
|
||||
queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
|
||||
rctx->complete = req->base.complete;
|
||||
req->base.complete = compl;
|
||||
|
||||
return cryptd_enqueue_request(queue, &req->base);
|
||||
}
|
||||
|
||||
static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
|
||||
{
|
||||
return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
|
||||
}
|
||||
|
||||
static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
|
||||
{
|
||||
return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
|
||||
}
|
||||
|
||||
static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
|
||||
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
struct crypto_spawn *spawn = &ictx->spawn;
|
||||
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_blkcipher *cipher;
|
||||
|
||||
cipher = crypto_spawn_blkcipher(spawn);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
ctx->child = cipher;
|
||||
tfm->crt_ablkcipher.reqsize =
|
||||
sizeof(struct cryptd_blkcipher_request_ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_blkcipher(ctx->child);
|
||||
}
|
||||
|
||||
static int cryptd_init_instance(struct crypto_instance *inst,
|
||||
struct crypto_alg *alg)
|
||||
{
|
||||
|
@ -382,67 +250,6 @@ out_free_inst:
|
|||
goto out;
|
||||
}
|
||||
|
||||
static int cryptd_create_blkcipher(struct crypto_template *tmpl,
|
||||
struct rtattr **tb,
|
||||
struct cryptd_queue *queue)
|
||||
{
|
||||
struct cryptd_instance_ctx *ctx;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
u32 mask = CRYPTO_ALG_TYPE_MASK;
|
||||
int err;
|
||||
|
||||
cryptd_check_internal(tb, &type, &mask);
|
||||
|
||||
alg = crypto_get_attr_alg(tb, type, mask);
|
||||
if (IS_ERR(alg))
|
||||
return PTR_ERR(alg);
|
||||
|
||||
inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
|
||||
err = PTR_ERR(inst);
|
||||
if (IS_ERR(inst))
|
||||
goto out_put_alg;
|
||||
|
||||
ctx = crypto_instance_ctx(inst);
|
||||
ctx->queue = queue;
|
||||
|
||||
err = crypto_init_spawn(&ctx->spawn, alg, inst,
|
||||
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
|
||||
type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
|
||||
if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
|
||||
type |= CRYPTO_ALG_INTERNAL;
|
||||
inst->alg.cra_flags = type;
|
||||
inst->alg.cra_type = &crypto_ablkcipher_type;
|
||||
|
||||
inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
|
||||
inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
|
||||
inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
|
||||
|
||||
inst->alg.cra_init = cryptd_blkcipher_init_tfm;
|
||||
inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
|
||||
|
||||
inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
|
||||
inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
|
||||
inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
|
||||
|
||||
err = crypto_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
crypto_drop_spawn(&ctx->spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
out_put_alg:
|
||||
crypto_mod_put(alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
|
@ -738,7 +545,6 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
|
|||
goto out;
|
||||
|
||||
desc->tfm = child;
|
||||
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
err = crypto_shash_init(desc);
|
||||
|
||||
|
@ -830,7 +636,6 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
|
|||
goto out;
|
||||
|
||||
desc->tfm = child;
|
||||
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
err = shash_ahash_digest(req, desc);
|
||||
|
||||
|
@ -859,7 +664,6 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in)
|
|||
struct shash_desc *desc = cryptd_shash_desc(req);
|
||||
|
||||
desc->tfm = ctx->child;
|
||||
desc->flags = req->base.flags;
|
||||
|
||||
return crypto_shash_import(desc, in);
|
||||
}
|
||||
|
@ -1118,10 +922,6 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
|
||||
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_BLKCIPHER:
|
||||
if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_BLKCIPHER)
|
||||
return cryptd_create_blkcipher(tmpl, tb, &queue);
|
||||
|
||||
return cryptd_create_skcipher(tmpl, tb, &queue);
|
||||
case CRYPTO_ALG_TYPE_DIGEST:
|
||||
return cryptd_create_hash(tmpl, tb, &queue);
|
||||
|
@ -1160,58 +960,6 @@ static struct crypto_template cryptd_tmpl = {
|
|||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
|
||||
struct cryptd_blkcipher_ctx *ctx;
|
||||
struct crypto_tfm *tfm;
|
||||
|
||||
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
|
||||
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-EINVAL);
|
||||
type = crypto_skcipher_type(type);
|
||||
mask &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
|
||||
tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
|
||||
if (IS_ERR(tfm))
|
||||
return ERR_CAST(tfm);
|
||||
if (tfm->__crt_alg->cra_module != THIS_MODULE) {
|
||||
crypto_free_tfm(tfm);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
ctx = crypto_tfm_ctx(tfm);
|
||||
atomic_set(&ctx->refcnt, 1);
|
||||
|
||||
return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
|
||||
|
||||
struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
|
||||
{
|
||||
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
|
||||
return ctx->child;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
|
||||
|
||||
bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
|
||||
{
|
||||
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
|
||||
|
||||
return atomic_read(&ctx->refcnt) - 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
|
||||
|
||||
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
|
||||
{
|
||||
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
|
||||
|
||||
if (atomic_dec_and_test(&ctx->refcnt))
|
||||
crypto_free_ablkcipher(&tfm->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
|
||||
|
||||
struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
|
|
|
@ -220,7 +220,7 @@ static void __exit crypto_null_mod_fini(void)
|
|||
crypto_unregister_skcipher(&skcipher_null);
|
||||
}
|
||||
|
||||
module_init(crypto_null_mod_init);
|
||||
subsys_initcall(crypto_null_mod_init);
|
||||
module_exit(crypto_null_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -384,7 +384,7 @@ static void __exit crypto_ctr_module_exit(void)
|
|||
ARRAY_SIZE(crypto_ctr_tmpls));
|
||||
}
|
||||
|
||||
module_init(crypto_ctr_module_init);
|
||||
subsys_initcall(crypto_ctr_module_init);
|
||||
module_exit(crypto_ctr_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
20
crypto/cts.c
20
crypto/cts.c
|
@ -152,12 +152,14 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
|
|||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
int bsize = crypto_skcipher_blocksize(tfm);
|
||||
unsigned int nbytes = req->cryptlen;
|
||||
int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
|
||||
unsigned int offset;
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->child);
|
||||
|
||||
if (cbc_blocks <= 0) {
|
||||
if (nbytes < bsize)
|
||||
return -EINVAL;
|
||||
|
||||
if (nbytes == bsize) {
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
req->base.complete,
|
||||
req->base.data);
|
||||
|
@ -166,7 +168,7 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
|
|||
return crypto_skcipher_encrypt(subreq);
|
||||
}
|
||||
|
||||
offset = cbc_blocks * bsize;
|
||||
offset = rounddown(nbytes - 1, bsize);
|
||||
rctx->offset = offset;
|
||||
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
|
@ -244,13 +246,15 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
|
|||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
int bsize = crypto_skcipher_blocksize(tfm);
|
||||
unsigned int nbytes = req->cryptlen;
|
||||
int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
|
||||
unsigned int offset;
|
||||
u8 *space;
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->child);
|
||||
|
||||
if (cbc_blocks <= 0) {
|
||||
if (nbytes < bsize)
|
||||
return -EINVAL;
|
||||
|
||||
if (nbytes == bsize) {
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
req->base.complete,
|
||||
req->base.data);
|
||||
|
@ -264,10 +268,10 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
|
|||
|
||||
space = crypto_cts_reqctx_space(req);
|
||||
|
||||
offset = cbc_blocks * bsize;
|
||||
offset = rounddown(nbytes - 1, bsize);
|
||||
rctx->offset = offset;
|
||||
|
||||
if (cbc_blocks <= 1)
|
||||
if (offset <= bsize)
|
||||
memcpy(space, req->iv, bsize);
|
||||
else
|
||||
scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
|
||||
|
@ -419,7 +423,7 @@ static void __exit crypto_cts_module_exit(void)
|
|||
crypto_unregister_template(&crypto_cts_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_cts_module_init);
|
||||
subsys_initcall(crypto_cts_module_init);
|
||||
module_exit(crypto_cts_module_exit);
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
|
|
@ -334,7 +334,7 @@ static void __exit deflate_mod_fini(void)
|
|||
crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
|
||||
}
|
||||
|
||||
module_init(deflate_mod_init);
|
||||
subsys_initcall(deflate_mod_init);
|
||||
module_exit(deflate_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -862,14 +862,11 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
const u32 *K = (const u32 *)key;
|
||||
int err;
|
||||
|
||||
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
|
||||
(*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
err = __des3_verify_key(flags, key);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
|
||||
dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
|
||||
|
@ -993,7 +990,7 @@ static void __exit des_generic_mod_fini(void)
|
|||
crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
|
||||
}
|
||||
|
||||
module_init(des_generic_mod_init);
|
||||
subsys_initcall(des_generic_mod_init);
|
||||
module_exit(des_generic_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -236,7 +236,7 @@ static void dh_exit(void)
|
|||
crypto_unregister_kpp(&dh);
|
||||
}
|
||||
|
||||
module_init(dh_init);
|
||||
subsys_initcall(dh_init);
|
||||
module_exit(dh_exit);
|
||||
MODULE_ALIAS_CRYPTO("dh");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -1587,7 +1587,6 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
|
|||
}
|
||||
|
||||
sdesc->shash.tfm = tfm;
|
||||
sdesc->shash.flags = 0;
|
||||
drbg->priv_data = sdesc;
|
||||
|
||||
return crypto_shash_alignmask(tfm);
|
||||
|
@ -2039,7 +2038,7 @@ static void __exit drbg_exit(void)
|
|||
crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
|
||||
}
|
||||
|
||||
module_init(drbg_init);
|
||||
subsys_initcall(drbg_init);
|
||||
module_exit(drbg_exit);
|
||||
#ifndef CRYPTO_DRBG_HASH_STRING
|
||||
#define CRYPTO_DRBG_HASH_STRING ""
|
||||
|
|
|
@ -101,7 +101,7 @@ static void __exit crypto_ecb_module_exit(void)
|
|||
crypto_unregister_template(&crypto_ecb_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_ecb_module_init);
|
||||
subsys_initcall(crypto_ecb_module_init);
|
||||
module_exit(crypto_ecb_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
415
crypto/ecc.c
415
crypto/ecc.c
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
* Copyright (c) 2013, Kenneth MacKay
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved.
|
||||
* Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
|
@ -24,12 +24,15 @@
|
|||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/swab.h>
|
||||
#include <linux/fips.h>
|
||||
#include <crypto/ecdh.h>
|
||||
#include <crypto/rng.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include "ecc.h"
|
||||
#include "ecc_curve_defs.h"
|
||||
|
@ -112,7 +115,7 @@ static void vli_clear(u64 *vli, unsigned int ndigits)
|
|||
}
|
||||
|
||||
/* Returns true if vli == 0, false otherwise. */
|
||||
static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
|
||||
bool vli_is_zero(const u64 *vli, unsigned int ndigits)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -123,6 +126,7 @@ static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
|
|||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(vli_is_zero);
|
||||
|
||||
/* Returns nonzero if bit bit of vli is set. */
|
||||
static u64 vli_test_bit(const u64 *vli, unsigned int bit)
|
||||
|
@ -130,6 +134,11 @@ static u64 vli_test_bit(const u64 *vli, unsigned int bit)
|
|||
return (vli[bit / 64] & ((u64)1 << (bit % 64)));
|
||||
}
|
||||
|
||||
static bool vli_is_negative(const u64 *vli, unsigned int ndigits)
|
||||
{
|
||||
return vli_test_bit(vli, ndigits * 64 - 1);
|
||||
}
|
||||
|
||||
/* Counts the number of 64-bit "digits" in vli. */
|
||||
static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
|
||||
{
|
||||
|
@ -161,6 +170,27 @@ static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
|
|||
return ((num_digits - 1) * 64 + i);
|
||||
}
|
||||
|
||||
/* Set dest from unaligned bit string src. */
|
||||
void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits)
|
||||
{
|
||||
int i;
|
||||
const u64 *from = src;
|
||||
|
||||
for (i = 0; i < ndigits; i++)
|
||||
dest[i] = get_unaligned_be64(&from[ndigits - 1 - i]);
|
||||
}
|
||||
EXPORT_SYMBOL(vli_from_be64);
|
||||
|
||||
void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits)
|
||||
{
|
||||
int i;
|
||||
const u64 *from = src;
|
||||
|
||||
for (i = 0; i < ndigits; i++)
|
||||
dest[i] = get_unaligned_le64(&from[i]);
|
||||
}
|
||||
EXPORT_SYMBOL(vli_from_le64);
|
||||
|
||||
/* Sets dest = src. */
|
||||
static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
|
||||
{
|
||||
|
@ -171,7 +201,7 @@ static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
|
|||
}
|
||||
|
||||
/* Returns sign of left - right. */
|
||||
static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
|
||||
int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -184,6 +214,7 @@ static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(vli_cmp);
|
||||
|
||||
/* Computes result = in << c, returning carry. Can modify in place
|
||||
* (if result == in). 0 < shift < 64.
|
||||
|
@ -239,8 +270,30 @@ static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
|
|||
return carry;
|
||||
}
|
||||
|
||||
/* Computes result = left + right, returning carry. Can modify in place. */
|
||||
static u64 vli_uadd(u64 *result, const u64 *left, u64 right,
|
||||
unsigned int ndigits)
|
||||
{
|
||||
u64 carry = right;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ndigits; i++) {
|
||||
u64 sum;
|
||||
|
||||
sum = left[i] + carry;
|
||||
if (sum != left[i])
|
||||
carry = (sum < left[i]);
|
||||
else
|
||||
carry = !!carry;
|
||||
|
||||
result[i] = sum;
|
||||
}
|
||||
|
||||
return carry;
|
||||
}
|
||||
|
||||
/* Computes result = left - right, returning borrow. Can modify in place. */
|
||||
static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
|
||||
u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
|
||||
unsigned int ndigits)
|
||||
{
|
||||
u64 borrow = 0;
|
||||
|
@ -258,9 +311,37 @@ static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
|
|||
|
||||
return borrow;
|
||||
}
|
||||
EXPORT_SYMBOL(vli_sub);
|
||||
|
||||
/* Computes result = left - right, returning borrow. Can modify in place. */
|
||||
static u64 vli_usub(u64 *result, const u64 *left, u64 right,
|
||||
unsigned int ndigits)
|
||||
{
|
||||
u64 borrow = right;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ndigits; i++) {
|
||||
u64 diff;
|
||||
|
||||
diff = left[i] - borrow;
|
||||
if (diff != left[i])
|
||||
borrow = (diff > left[i]);
|
||||
|
||||
result[i] = diff;
|
||||
}
|
||||
|
||||
return borrow;
|
||||
}
|
||||
|
||||
static uint128_t mul_64_64(u64 left, u64 right)
|
||||
{
|
||||
uint128_t result;
|
||||
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
|
||||
unsigned __int128 m = (unsigned __int128)left * right;
|
||||
|
||||
result.m_low = m;
|
||||
result.m_high = m >> 64;
|
||||
#else
|
||||
u64 a0 = left & 0xffffffffull;
|
||||
u64 a1 = left >> 32;
|
||||
u64 b0 = right & 0xffffffffull;
|
||||
|
@ -269,7 +350,6 @@ static uint128_t mul_64_64(u64 left, u64 right)
|
|||
u64 m1 = a0 * b1;
|
||||
u64 m2 = a1 * b0;
|
||||
u64 m3 = a1 * b1;
|
||||
uint128_t result;
|
||||
|
||||
m2 += (m0 >> 32);
|
||||
m2 += m1;
|
||||
|
@ -280,7 +360,7 @@ static uint128_t mul_64_64(u64 left, u64 right)
|
|||
|
||||
result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
|
||||
result.m_high = m3 + (m2 >> 32);
|
||||
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -330,6 +410,28 @@ static void vli_mult(u64 *result, const u64 *left, const u64 *right,
|
|||
result[ndigits * 2 - 1] = r01.m_low;
|
||||
}
|
||||
|
||||
/* Compute product = left * right, for a small right value. */
|
||||
static void vli_umult(u64 *result, const u64 *left, u32 right,
|
||||
unsigned int ndigits)
|
||||
{
|
||||
uint128_t r01 = { 0 };
|
||||
unsigned int k;
|
||||
|
||||
for (k = 0; k < ndigits; k++) {
|
||||
uint128_t product;
|
||||
|
||||
product = mul_64_64(left[k], right);
|
||||
r01 = add_128_128(r01, product);
|
||||
/* no carry */
|
||||
result[k] = r01.m_low;
|
||||
r01.m_low = r01.m_high;
|
||||
r01.m_high = 0;
|
||||
}
|
||||
result[k] = r01.m_low;
|
||||
for (++k; k < ndigits * 2; k++)
|
||||
result[k] = 0;
|
||||
}
|
||||
|
||||
static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
|
||||
{
|
||||
uint128_t r01 = { 0, 0 };
|
||||
|
@ -402,6 +504,170 @@ static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
|
|||
vli_add(result, result, mod, ndigits);
|
||||
}
|
||||
|
||||
/*
|
||||
* Computes result = product % mod
|
||||
* for special form moduli: p = 2^k-c, for small c (note the minus sign)
|
||||
*
|
||||
* References:
|
||||
* R. Crandall, C. Pomerance. Prime Numbers: A Computational Perspective.
|
||||
* 9 Fast Algorithms for Large-Integer Arithmetic. 9.2.3 Moduli of special form
|
||||
* Algorithm 9.2.13 (Fast mod operation for special-form moduli).
|
||||
*/
|
||||
static void vli_mmod_special(u64 *result, const u64 *product,
|
||||
const u64 *mod, unsigned int ndigits)
|
||||
{
|
||||
u64 c = -mod[0];
|
||||
u64 t[ECC_MAX_DIGITS * 2];
|
||||
u64 r[ECC_MAX_DIGITS * 2];
|
||||
|
||||
vli_set(r, product, ndigits * 2);
|
||||
while (!vli_is_zero(r + ndigits, ndigits)) {
|
||||
vli_umult(t, r + ndigits, c, ndigits);
|
||||
vli_clear(r + ndigits, ndigits);
|
||||
vli_add(r, r, t, ndigits * 2);
|
||||
}
|
||||
vli_set(t, mod, ndigits);
|
||||
vli_clear(t + ndigits, ndigits);
|
||||
while (vli_cmp(r, t, ndigits * 2) >= 0)
|
||||
vli_sub(r, r, t, ndigits * 2);
|
||||
vli_set(result, r, ndigits);
|
||||
}
|
||||
|
||||
/*
|
||||
* Computes result = product % mod
|
||||
* for special form moduli: p = 2^{k-1}+c, for small c (note the plus sign)
|
||||
* where k-1 does not fit into qword boundary by -1 bit (such as 255).
|
||||
|
||||
* References (loosely based on):
|
||||
* A. Menezes, P. van Oorschot, S. Vanstone. Handbook of Applied Cryptography.
|
||||
* 14.3.4 Reduction methods for moduli of special form. Algorithm 14.47.
|
||||
* URL: http://cacr.uwaterloo.ca/hac/about/chap14.pdf
|
||||
*
|
||||
* H. Cohen, G. Frey, R. Avanzi, C. Doche, T. Lange, K. Nguyen, F. Vercauteren.
|
||||
* Handbook of Elliptic and Hyperelliptic Curve Cryptography.
|
||||
* Algorithm 10.25 Fast reduction for special form moduli
|
||||
*/
|
||||
static void vli_mmod_special2(u64 *result, const u64 *product,
|
||||
const u64 *mod, unsigned int ndigits)
|
||||
{
|
||||
u64 c2 = mod[0] * 2;
|
||||
u64 q[ECC_MAX_DIGITS];
|
||||
u64 r[ECC_MAX_DIGITS * 2];
|
||||
u64 m[ECC_MAX_DIGITS * 2]; /* expanded mod */
|
||||
int carry; /* last bit that doesn't fit into q */
|
||||
int i;
|
||||
|
||||
vli_set(m, mod, ndigits);
|
||||
vli_clear(m + ndigits, ndigits);
|
||||
|
||||
vli_set(r, product, ndigits);
|
||||
/* q and carry are top bits */
|
||||
vli_set(q, product + ndigits, ndigits);
|
||||
vli_clear(r + ndigits, ndigits);
|
||||
carry = vli_is_negative(r, ndigits);
|
||||
if (carry)
|
||||
r[ndigits - 1] &= (1ull << 63) - 1;
|
||||
for (i = 1; carry || !vli_is_zero(q, ndigits); i++) {
|
||||
u64 qc[ECC_MAX_DIGITS * 2];
|
||||
|
||||
vli_umult(qc, q, c2, ndigits);
|
||||
if (carry)
|
||||
vli_uadd(qc, qc, mod[0], ndigits * 2);
|
||||
vli_set(q, qc + ndigits, ndigits);
|
||||
vli_clear(qc + ndigits, ndigits);
|
||||
carry = vli_is_negative(qc, ndigits);
|
||||
if (carry)
|
||||
qc[ndigits - 1] &= (1ull << 63) - 1;
|
||||
if (i & 1)
|
||||
vli_sub(r, r, qc, ndigits * 2);
|
||||
else
|
||||
vli_add(r, r, qc, ndigits * 2);
|
||||
}
|
||||
while (vli_is_negative(r, ndigits * 2))
|
||||
vli_add(r, r, m, ndigits * 2);
|
||||
while (vli_cmp(r, m, ndigits * 2) >= 0)
|
||||
vli_sub(r, r, m, ndigits * 2);
|
||||
|
||||
vli_set(result, r, ndigits);
|
||||
}
|
||||
|
||||
/*
|
||||
* Computes result = product % mod, where product is 2N words long.
|
||||
* Reference: Ken MacKay's micro-ecc.
|
||||
* Currently only designed to work for curve_p or curve_n.
|
||||
*/
|
||||
static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod,
|
||||
unsigned int ndigits)
|
||||
{
|
||||
u64 mod_m[2 * ECC_MAX_DIGITS];
|
||||
u64 tmp[2 * ECC_MAX_DIGITS];
|
||||
u64 *v[2] = { tmp, product };
|
||||
u64 carry = 0;
|
||||
unsigned int i;
|
||||
/* Shift mod so its highest set bit is at the maximum position. */
|
||||
int shift = (ndigits * 2 * 64) - vli_num_bits(mod, ndigits);
|
||||
int word_shift = shift / 64;
|
||||
int bit_shift = shift % 64;
|
||||
|
||||
vli_clear(mod_m, word_shift);
|
||||
if (bit_shift > 0) {
|
||||
for (i = 0; i < ndigits; ++i) {
|
||||
mod_m[word_shift + i] = (mod[i] << bit_shift) | carry;
|
||||
carry = mod[i] >> (64 - bit_shift);
|
||||
}
|
||||
} else
|
||||
vli_set(mod_m + word_shift, mod, ndigits);
|
||||
|
||||
for (i = 1; shift >= 0; --shift) {
|
||||
u64 borrow = 0;
|
||||
unsigned int j;
|
||||
|
||||
for (j = 0; j < ndigits * 2; ++j) {
|
||||
u64 diff = v[i][j] - mod_m[j] - borrow;
|
||||
|
||||
if (diff != v[i][j])
|
||||
borrow = (diff > v[i][j]);
|
||||
v[1 - i][j] = diff;
|
||||
}
|
||||
i = !(i ^ borrow); /* Swap the index if there was no borrow */
|
||||
vli_rshift1(mod_m, ndigits);
|
||||
mod_m[ndigits - 1] |= mod_m[ndigits] << (64 - 1);
|
||||
vli_rshift1(mod_m + ndigits, ndigits);
|
||||
}
|
||||
vli_set(result, v[i], ndigits);
|
||||
}
|
||||
|
||||
/* Computes result = product % mod using Barrett's reduction with precomputed
|
||||
* value mu appended to the mod after ndigits, mu = (2^{2w} / mod) and have
|
||||
* length ndigits + 1, where mu * (2^w - 1) should not overflow ndigits
|
||||
* boundary.
|
||||
*
|
||||
* Reference:
|
||||
* R. Brent, P. Zimmermann. Modern Computer Arithmetic. 2010.
|
||||
* 2.4.1 Barrett's algorithm. Algorithm 2.5.
|
||||
*/
|
||||
static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
|
||||
unsigned int ndigits)
|
||||
{
|
||||
u64 q[ECC_MAX_DIGITS * 2];
|
||||
u64 r[ECC_MAX_DIGITS * 2];
|
||||
const u64 *mu = mod + ndigits;
|
||||
|
||||
vli_mult(q, product + ndigits, mu, ndigits);
|
||||
if (mu[ndigits])
|
||||
vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits);
|
||||
vli_mult(r, mod, q + ndigits, ndigits);
|
||||
vli_sub(r, product, r, ndigits * 2);
|
||||
while (!vli_is_zero(r + ndigits, ndigits) ||
|
||||
vli_cmp(r, mod, ndigits) != -1) {
|
||||
u64 carry;
|
||||
|
||||
carry = vli_sub(r, r, mod, ndigits);
|
||||
vli_usub(r + ndigits, r + ndigits, carry, ndigits);
|
||||
}
|
||||
vli_set(result, r, ndigits);
|
||||
}
|
||||
|
||||
/* Computes p_result = p_product % curve_p.
|
||||
* See algorithm 5 and 6 from
|
||||
* http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
|
||||
|
@ -509,14 +775,33 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
|
|||
}
|
||||
}
|
||||
|
||||
/* Computes result = product % curve_prime
|
||||
* from http://www.nsa.gov/ia/_files/nist-routines.pdf
|
||||
*/
|
||||
/* Computes result = product % curve_prime for different curve_primes.
|
||||
*
|
||||
* Note that curve_primes are distinguished just by heuristic check and
|
||||
* not by complete conformance check.
|
||||
*/
|
||||
static bool vli_mmod_fast(u64 *result, u64 *product,
|
||||
const u64 *curve_prime, unsigned int ndigits)
|
||||
{
|
||||
u64 tmp[2 * ECC_MAX_DIGITS];
|
||||
|
||||
/* Currently, both NIST primes have -1 in lowest qword. */
|
||||
if (curve_prime[0] != -1ull) {
|
||||
/* Try to handle Pseudo-Marsenne primes. */
|
||||
if (curve_prime[ndigits - 1] == -1ull) {
|
||||
vli_mmod_special(result, product, curve_prime,
|
||||
ndigits);
|
||||
return true;
|
||||
} else if (curve_prime[ndigits - 1] == 1ull << 63 &&
|
||||
curve_prime[ndigits - 2] == 0) {
|
||||
vli_mmod_special2(result, product, curve_prime,
|
||||
ndigits);
|
||||
return true;
|
||||
}
|
||||
vli_mmod_barrett(result, product, curve_prime, ndigits);
|
||||
return true;
|
||||
}
|
||||
|
||||
switch (ndigits) {
|
||||
case 3:
|
||||
vli_mmod_fast_192(result, product, curve_prime, tmp);
|
||||
|
@ -525,13 +810,26 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
|
|||
vli_mmod_fast_256(result, product, curve_prime, tmp);
|
||||
break;
|
||||
default:
|
||||
pr_err("unsupports digits size!\n");
|
||||
pr_err_ratelimited("ecc: unsupported digits size!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Computes result = (left * right) % mod.
|
||||
* Assumes that mod is big enough curve order.
|
||||
*/
|
||||
void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
|
||||
const u64 *mod, unsigned int ndigits)
|
||||
{
|
||||
u64 product[ECC_MAX_DIGITS * 2];
|
||||
|
||||
vli_mult(product, left, right, ndigits);
|
||||
vli_mmod_slow(result, product, mod, ndigits);
|
||||
}
|
||||
EXPORT_SYMBOL(vli_mod_mult_slow);
|
||||
|
||||
/* Computes result = (left * right) % curve_prime. */
|
||||
static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
|
||||
const u64 *curve_prime, unsigned int ndigits)
|
||||
|
@ -557,7 +855,7 @@ static void vli_mod_square_fast(u64 *result, const u64 *left,
|
|||
* See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
|
||||
* https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
|
||||
*/
|
||||
static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
|
||||
void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
|
||||
unsigned int ndigits)
|
||||
{
|
||||
u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS];
|
||||
|
@ -630,6 +928,7 @@ static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
|
|||
|
||||
vli_set(result, u, ndigits);
|
||||
}
|
||||
EXPORT_SYMBOL(vli_mod_inv);
|
||||
|
||||
/* ------ Point operations ------ */
|
||||
|
||||
|
@ -903,6 +1202,85 @@ static void ecc_point_mult(struct ecc_point *result,
|
|||
vli_set(result->y, ry[0], ndigits);
|
||||
}
|
||||
|
||||
/* Computes R = P + Q mod p */
|
||||
static void ecc_point_add(const struct ecc_point *result,
|
||||
const struct ecc_point *p, const struct ecc_point *q,
|
||||
const struct ecc_curve *curve)
|
||||
{
|
||||
u64 z[ECC_MAX_DIGITS];
|
||||
u64 px[ECC_MAX_DIGITS];
|
||||
u64 py[ECC_MAX_DIGITS];
|
||||
unsigned int ndigits = curve->g.ndigits;
|
||||
|
||||
vli_set(result->x, q->x, ndigits);
|
||||
vli_set(result->y, q->y, ndigits);
|
||||
vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
|
||||
vli_set(px, p->x, ndigits);
|
||||
vli_set(py, p->y, ndigits);
|
||||
xycz_add(px, py, result->x, result->y, curve->p, ndigits);
|
||||
vli_mod_inv(z, z, curve->p, ndigits);
|
||||
apply_z(result->x, result->y, z, curve->p, ndigits);
|
||||
}
|
||||
|
||||
/* Computes R = u1P + u2Q mod p using Shamir's trick.
|
||||
* Based on: Kenneth MacKay's micro-ecc (2014).
|
||||
*/
|
||||
void ecc_point_mult_shamir(const struct ecc_point *result,
|
||||
const u64 *u1, const struct ecc_point *p,
|
||||
const u64 *u2, const struct ecc_point *q,
|
||||
const struct ecc_curve *curve)
|
||||
{
|
||||
u64 z[ECC_MAX_DIGITS];
|
||||
u64 sump[2][ECC_MAX_DIGITS];
|
||||
u64 *rx = result->x;
|
||||
u64 *ry = result->y;
|
||||
unsigned int ndigits = curve->g.ndigits;
|
||||
unsigned int num_bits;
|
||||
struct ecc_point sum = ECC_POINT_INIT(sump[0], sump[1], ndigits);
|
||||
const struct ecc_point *points[4];
|
||||
const struct ecc_point *point;
|
||||
unsigned int idx;
|
||||
int i;
|
||||
|
||||
ecc_point_add(&sum, p, q, curve);
|
||||
points[0] = NULL;
|
||||
points[1] = p;
|
||||
points[2] = q;
|
||||
points[3] = ∑
|
||||
|
||||
num_bits = max(vli_num_bits(u1, ndigits),
|
||||
vli_num_bits(u2, ndigits));
|
||||
i = num_bits - 1;
|
||||
idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
|
||||
point = points[idx];
|
||||
|
||||
vli_set(rx, point->x, ndigits);
|
||||
vli_set(ry, point->y, ndigits);
|
||||
vli_clear(z + 1, ndigits - 1);
|
||||
z[0] = 1;
|
||||
|
||||
for (--i; i >= 0; i--) {
|
||||
ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits);
|
||||
idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
|
||||
point = points[idx];
|
||||
if (point) {
|
||||
u64 tx[ECC_MAX_DIGITS];
|
||||
u64 ty[ECC_MAX_DIGITS];
|
||||
u64 tz[ECC_MAX_DIGITS];
|
||||
|
||||
vli_set(tx, point->x, ndigits);
|
||||
vli_set(ty, point->y, ndigits);
|
||||
apply_z(tx, ty, z, curve->p, ndigits);
|
||||
vli_mod_sub(tz, rx, tx, curve->p, ndigits);
|
||||
xycz_add(tx, ty, rx, ry, curve->p, ndigits);
|
||||
vli_mod_mult_fast(z, z, tz, curve->p, ndigits);
|
||||
}
|
||||
}
|
||||
vli_mod_inv(z, z, curve->p, ndigits);
|
||||
apply_z(rx, ry, z, curve->p, ndigits);
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_point_mult_shamir);
|
||||
|
||||
static inline void ecc_swap_digits(const u64 *in, u64 *out,
|
||||
unsigned int ndigits)
|
||||
{
|
||||
|
@ -948,6 +1326,7 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
|
|||
|
||||
return __ecc_is_key_valid(curve, private_key, ndigits);
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_is_key_valid);
|
||||
|
||||
/*
|
||||
* ECC private keys are generated using the method of extra random bits,
|
||||
|
@ -1000,6 +1379,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_gen_privkey);
|
||||
|
||||
int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
|
||||
const u64 *private_key, u64 *public_key)
|
||||
|
@ -1036,13 +1416,17 @@ err_free_point:
|
|||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_make_pub_key);
|
||||
|
||||
/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
|
||||
static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
|
||||
int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
|
||||
struct ecc_point *pk)
|
||||
{
|
||||
u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
|
||||
|
||||
if (WARN_ON(pk->ndigits != curve->g.ndigits))
|
||||
return -EINVAL;
|
||||
|
||||
/* Check 1: Verify key is not the zero point. */
|
||||
if (ecc_point_is_zero(pk))
|
||||
return -EINVAL;
|
||||
|
@ -1064,8 +1448,8 @@ static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
|
|||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_is_pubkey_valid_partial);
|
||||
|
||||
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||
const u64 *private_key, const u64 *public_key,
|
||||
|
@ -1121,3 +1505,6 @@ err_alloc_product:
|
|||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(crypto_ecdh_shared_secret);
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
|
153
crypto/ecc.h
153
crypto/ecc.h
|
@ -26,12 +26,50 @@
|
|||
#ifndef _CRYPTO_ECC_H
|
||||
#define _CRYPTO_ECC_H
|
||||
|
||||
/* One digit is u64 qword. */
|
||||
#define ECC_CURVE_NIST_P192_DIGITS 3
|
||||
#define ECC_CURVE_NIST_P256_DIGITS 4
|
||||
#define ECC_MAX_DIGITS ECC_CURVE_NIST_P256_DIGITS
|
||||
#define ECC_MAX_DIGITS (512 / 64)
|
||||
|
||||
#define ECC_DIGITS_TO_BYTES_SHIFT 3
|
||||
|
||||
/**
|
||||
* struct ecc_point - elliptic curve point in affine coordinates
|
||||
*
|
||||
* @x: X coordinate in vli form.
|
||||
* @y: Y coordinate in vli form.
|
||||
* @ndigits: Length of vlis in u64 qwords.
|
||||
*/
|
||||
struct ecc_point {
|
||||
u64 *x;
|
||||
u64 *y;
|
||||
u8 ndigits;
|
||||
};
|
||||
|
||||
#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
|
||||
|
||||
/**
|
||||
* struct ecc_curve - definition of elliptic curve
|
||||
*
|
||||
* @name: Short name of the curve.
|
||||
* @g: Generator point of the curve.
|
||||
* @p: Prime number, if Barrett's reduction is used for this curve
|
||||
* pre-calculated value 'mu' is appended to the @p after ndigits.
|
||||
* Use of Barrett's reduction is heuristically determined in
|
||||
* vli_mmod_fast().
|
||||
* @n: Order of the curve group.
|
||||
* @a: Curve parameter a.
|
||||
* @b: Curve parameter b.
|
||||
*/
|
||||
struct ecc_curve {
|
||||
char *name;
|
||||
struct ecc_point g;
|
||||
u64 *p;
|
||||
u64 *n;
|
||||
u64 *a;
|
||||
u64 *b;
|
||||
};
|
||||
|
||||
/**
|
||||
* ecc_is_key_valid() - Validate a given ECDH private key
|
||||
*
|
||||
|
@ -91,4 +129,117 @@ int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
|
|||
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||
const u64 *private_key, const u64 *public_key,
|
||||
u64 *secret);
|
||||
|
||||
/**
|
||||
* ecc_is_pubkey_valid_partial() - Partial public key validation
|
||||
*
|
||||
* @curve: elliptic curve domain parameters
|
||||
* @pk: public key as a point
|
||||
*
|
||||
* Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
|
||||
* Public-Key Validation Routine.
|
||||
*
|
||||
* Note: There is no check that the public key is in the correct elliptic curve
|
||||
* subgroup.
|
||||
*
|
||||
* Return: 0 if validation is successful, -EINVAL if validation is failed.
|
||||
*/
|
||||
int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
|
||||
struct ecc_point *pk);
|
||||
|
||||
/**
|
||||
* vli_is_zero() - Determine is vli is zero
|
||||
*
|
||||
* @vli: vli to check.
|
||||
* @ndigits: length of the @vli
|
||||
*/
|
||||
bool vli_is_zero(const u64 *vli, unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* vli_cmp() - compare left and right vlis
|
||||
*
|
||||
* @left: vli
|
||||
* @right: vli
|
||||
* @ndigits: length of both vlis
|
||||
*
|
||||
* Returns sign of @left - @right, i.e. -1 if @left < @right,
|
||||
* 0 if @left == @right, 1 if @left > @right.
|
||||
*/
|
||||
int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* vli_sub() - Subtracts right from left
|
||||
*
|
||||
* @result: where to write result
|
||||
* @left: vli
|
||||
* @right vli
|
||||
* @ndigits: length of all vlis
|
||||
*
|
||||
* Note: can modify in-place.
|
||||
*
|
||||
* Return: carry bit.
|
||||
*/
|
||||
u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
|
||||
unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* vli_from_be64() - Load vli from big-endian u64 array
|
||||
*
|
||||
* @dest: destination vli
|
||||
* @src: source array of u64 BE values
|
||||
* @ndigits: length of both vli and array
|
||||
*/
|
||||
void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* vli_from_le64() - Load vli from little-endian u64 array
|
||||
*
|
||||
* @dest: destination vli
|
||||
* @src: source array of u64 LE values
|
||||
* @ndigits: length of both vli and array
|
||||
*/
|
||||
void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* vli_mod_inv() - Modular inversion
|
||||
*
|
||||
* @result: where to write vli number
|
||||
* @input: vli value to operate on
|
||||
* @mod: modulus
|
||||
* @ndigits: length of all vlis
|
||||
*/
|
||||
void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
|
||||
unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* vli_mod_mult_slow() - Modular multiplication
|
||||
*
|
||||
* @result: where to write result value
|
||||
* @left: vli number to multiply with @right
|
||||
* @right: vli number to multiply with @left
|
||||
* @mod: modulus
|
||||
* @ndigits: length of all vlis
|
||||
*
|
||||
* Note: Assumes that mod is big enough curve order.
|
||||
*/
|
||||
void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
|
||||
const u64 *mod, unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* ecc_point_mult_shamir() - Add two points multiplied by scalars
|
||||
*
|
||||
* @result: resulting point
|
||||
* @x: scalar to multiply with @p
|
||||
* @p: point to multiply with @x
|
||||
* @y: scalar to multiply with @q
|
||||
* @q: point to multiply with @y
|
||||
* @curve: curve
|
||||
*
|
||||
* Returns result = x * p + x * q over the curve.
|
||||
* This works faster than two multiplications and addition.
|
||||
*/
|
||||
void ecc_point_mult_shamir(const struct ecc_point *result,
|
||||
const u64 *x, const struct ecc_point *p,
|
||||
const u64 *y, const struct ecc_point *q,
|
||||
const struct ecc_curve *curve);
|
||||
#endif
|
||||
|
|
|
@ -2,21 +2,6 @@
|
|||
#ifndef _CRYTO_ECC_CURVE_DEFS_H
|
||||
#define _CRYTO_ECC_CURVE_DEFS_H
|
||||
|
||||
struct ecc_point {
|
||||
u64 *x;
|
||||
u64 *y;
|
||||
u8 ndigits;
|
||||
};
|
||||
|
||||
struct ecc_curve {
|
||||
char *name;
|
||||
struct ecc_point g;
|
||||
u64 *p;
|
||||
u64 *n;
|
||||
u64 *a;
|
||||
u64 *b;
|
||||
};
|
||||
|
||||
/* NIST P-192: a = p - 3 */
|
||||
static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
|
||||
0x188DA80EB03090F6ull };
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче