Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Add library interfaces of certain crypto algorithms for WireGuard - Remove the obsolete ablkcipher and blkcipher interfaces - Move add_early_randomness() out of rng_mutex Algorithms: - Add blake2b shash algorithm - Add blake2s shash algorithm - Add curve25519 kpp algorithm - Implement 4 way interleave in arm64/gcm-ce - Implement ciphertext stealing in powerpc/spe-xts - Add Eric Biggers's scalar accelerated ChaCha code for ARM - Add accelerated 32r2 code from Zinc for MIPS - Add OpenSSL/CRYPTOGRAMS poly1305 implementation for ARM and MIPS Drivers: - Fix entropy reading failures in ks-sa - Add support for sam9x60 in atmel - Add crypto accelerator for amlogic GXL - Add sun8i-ce Crypto Engine - Add sun8i-ss cryptographic offloader - Add a host of algorithms to inside-secure - Add NPCM RNG driver - add HiSilicon HPRE accelerator - Add HiSilicon TRNG driver" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (285 commits) crypto: vmx - Avoid weird build failures crypto: lib/chacha20poly1305 - use chacha20_crypt() crypto: x86/chacha - only unregister algorithms if registered crypto: chacha_generic - remove unnecessary setkey() functions crypto: amlogic - enable working on big endian kernel crypto: sun8i-ce - enable working on big endian crypto: mips/chacha - select CRYPTO_SKCIPHER, not CRYPTO_BLKCIPHER hwrng: ks-sa - Enable COMPILE_TEST crypto: essiv - remove redundant null pointer check before kfree crypto: atmel-aes - Change data type for "lastc" buffer crypto: atmel-tdes - Set the IV after {en,de}crypt crypto: sun4i-ss - fix big endian issues crypto: sun4i-ss - hide the Invalid keylen message crypto: sun4i-ss - use crypto_ahash_digestsize crypto: sun4i-ss - remove dependency on not 64BIT crypto: sun4i-ss - Fix 64-bit size_t warnings on sun4i-ss-hash.c MAINTAINERS: Add maintainer for HiSilicon SEC V2 driver crypto: hisilicon - add DebugFS for HiSilicon SEC Documentation: add DebugFS doc for HiSilicon SEC crypto: hisilicon - add SRIOV for HiSilicon SEC ...
This commit is contained in:
Коммит
642356cb5f
|
@ -0,0 +1,57 @@
|
|||
What: /sys/kernel/debug/hisi_hpre/<bdf>/cluster[0-3]/regs
|
||||
Date: Sep 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Dump debug registers from the HPRE cluster.
|
||||
Only available for PF.
|
||||
|
||||
What: /sys/kernel/debug/hisi_hpre/<bdf>/cluster[0-3]/cluster_ctrl
|
||||
Date: Sep 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Write the HPRE core selection in the cluster into this file,
|
||||
and then we can read the debug information of the core.
|
||||
Only available for PF.
|
||||
|
||||
What: /sys/kernel/debug/hisi_hpre/<bdf>/rdclr_en
|
||||
Date: Sep 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: HPRE cores debug registers read clear control. 1 means enable
|
||||
register read clear, otherwise 0. Writing to this file has no
|
||||
functional effect, only enable or disable counters clear after
|
||||
reading of these registers.
|
||||
Only available for PF.
|
||||
|
||||
What: /sys/kernel/debug/hisi_hpre/<bdf>/current_qm
|
||||
Date: Sep 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: One HPRE controller has one PF and multiple VFs, each function
|
||||
has a QM. Select the QM which below qm refers to.
|
||||
Only available for PF.
|
||||
|
||||
What: /sys/kernel/debug/hisi_hpre/<bdf>/regs
|
||||
Date: Sep 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Dump debug registers from the HPRE.
|
||||
Only available for PF.
|
||||
|
||||
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/qm_regs
|
||||
Date: Sep 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Dump debug registers from the QM.
|
||||
Available for PF and VF in host. VF in guest currently only
|
||||
has one debug register.
|
||||
|
||||
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/current_q
|
||||
Date: Sep 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: One QM may contain multiple queues. Select specific queue to
|
||||
show its debug registers in above qm_regs.
|
||||
Only available for PF.
|
||||
|
||||
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/clear_enable
|
||||
Date: Sep 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: QM debug registers(qm_regs) read clear control. 1 means enable
|
||||
register read clear, otherwise 0.
|
||||
Writing to this file has no functional effect, only enable or
|
||||
disable counters clear after reading of these registers.
|
||||
Only available for PF.
|
|
@ -0,0 +1,43 @@
|
|||
What: /sys/kernel/debug/hisi_sec/<bdf>/sec_dfx
|
||||
Date: Oct 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Dump the debug registers of SEC cores.
|
||||
Only available for PF.
|
||||
|
||||
What: /sys/kernel/debug/hisi_sec/<bdf>/clear_enable
|
||||
Date: Oct 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Enabling/disabling of clear action after reading
|
||||
the SEC debug registers.
|
||||
0: disable, 1: enable.
|
||||
Only available for PF, and take no other effect on SEC.
|
||||
|
||||
What: /sys/kernel/debug/hisi_sec/<bdf>/current_qm
|
||||
Date: Oct 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: One SEC controller has one PF and multiple VFs, each function
|
||||
has a QM. This file can be used to select the QM which below
|
||||
qm refers to.
|
||||
Only available for PF.
|
||||
|
||||
What: /sys/kernel/debug/hisi_sec/<bdf>/qm/qm_regs
|
||||
Date: Oct 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Dump of QM related debug registers.
|
||||
Available for PF and VF in host. VF in guest currently only
|
||||
has one debug register.
|
||||
|
||||
What: /sys/kernel/debug/hisi_sec/<bdf>/qm/current_q
|
||||
Date: Oct 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: One QM of SEC may contain multiple queues. Select specific
|
||||
queue to show its debug registers in above 'qm_regs'.
|
||||
Only available for PF.
|
||||
|
||||
What: /sys/kernel/debug/hisi_sec/<bdf>/qm/clear_enable
|
||||
Date: Oct 2019
|
||||
Contact: linux-crypto@vger.kernel.org
|
||||
Description: Enabling/disabling of clear action after reading
|
||||
the SEC's QM debug registers.
|
||||
0: disable, 1: enable.
|
||||
Only available for PF, and take no other effect on SEC.
|
|
@ -5,7 +5,7 @@ Block Cipher Algorithm Definitions
|
|||
:doc: Block Cipher Algorithm Definitions
|
||||
|
||||
.. kernel-doc:: include/linux/crypto.h
|
||||
:functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg compress_alg
|
||||
:functions: crypto_alg cipher_alg compress_alg
|
||||
|
||||
Symmetric Key Cipher API
|
||||
------------------------
|
||||
|
@ -33,30 +33,3 @@ Single Block Cipher API
|
|||
|
||||
.. kernel-doc:: include/linux/crypto.h
|
||||
:functions: crypto_alloc_cipher crypto_free_cipher crypto_has_cipher crypto_cipher_blocksize crypto_cipher_setkey crypto_cipher_encrypt_one crypto_cipher_decrypt_one
|
||||
|
||||
Asynchronous Block Cipher API - Deprecated
|
||||
------------------------------------------
|
||||
|
||||
.. kernel-doc:: include/linux/crypto.h
|
||||
:doc: Asynchronous Block Cipher API
|
||||
|
||||
.. kernel-doc:: include/linux/crypto.h
|
||||
:functions: crypto_free_ablkcipher crypto_has_ablkcipher crypto_ablkcipher_ivsize crypto_ablkcipher_blocksize crypto_ablkcipher_setkey crypto_ablkcipher_reqtfm crypto_ablkcipher_encrypt crypto_ablkcipher_decrypt
|
||||
|
||||
Asynchronous Cipher Request Handle - Deprecated
|
||||
-----------------------------------------------
|
||||
|
||||
.. kernel-doc:: include/linux/crypto.h
|
||||
:doc: Asynchronous Cipher Request Handle
|
||||
|
||||
.. kernel-doc:: include/linux/crypto.h
|
||||
:functions: crypto_ablkcipher_reqsize ablkcipher_request_set_tfm ablkcipher_request_alloc ablkcipher_request_free ablkcipher_request_set_callback ablkcipher_request_set_crypt
|
||||
|
||||
Synchronous Block Cipher API - Deprecated
|
||||
-----------------------------------------
|
||||
|
||||
.. kernel-doc:: include/linux/crypto.h
|
||||
:doc: Synchronous Block Cipher API
|
||||
|
||||
.. kernel-doc:: include/linux/crypto.h
|
||||
:functions: crypto_alloc_blkcipher crypto_free_blkcipher crypto_has_blkcipher crypto_blkcipher_name crypto_blkcipher_ivsize crypto_blkcipher_blocksize crypto_blkcipher_setkey crypto_blkcipher_encrypt crypto_blkcipher_encrypt_iv crypto_blkcipher_decrypt crypto_blkcipher_decrypt_iv crypto_blkcipher_set_iv crypto_blkcipher_get_iv
|
||||
|
|
|
@ -201,10 +201,6 @@ the aforementioned cipher types:
|
|||
- CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with Associated Data
|
||||
(MAC)
|
||||
|
||||
- CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher
|
||||
|
||||
- CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher
|
||||
|
||||
- CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as
|
||||
an ECDH or DH implementation
|
||||
|
||||
|
|
|
@ -63,8 +63,6 @@ request by using:
|
|||
When your driver receives a crypto_request, you must to transfer it to
|
||||
the crypto engine via one of:
|
||||
|
||||
* crypto_transfer_ablkcipher_request_to_engine()
|
||||
|
||||
* crypto_transfer_aead_request_to_engine()
|
||||
|
||||
* crypto_transfer_akcipher_request_to_engine()
|
||||
|
@ -75,8 +73,6 @@ the crypto engine via one of:
|
|||
|
||||
At the end of the request process, a call to one of the following functions is needed:
|
||||
|
||||
* crypto_finalize_ablkcipher_request()
|
||||
|
||||
* crypto_finalize_aead_request()
|
||||
|
||||
* crypto_finalize_akcipher_request()
|
||||
|
|
|
@ -128,25 +128,20 @@ process requests that are unaligned. This implies, however, additional
|
|||
overhead as the kernel crypto API needs to perform the realignment of
|
||||
the data which may imply moving of data.
|
||||
|
||||
Cipher Definition With struct blkcipher_alg and ablkcipher_alg
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Cipher Definition With struct skcipher_alg
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Struct blkcipher_alg defines a synchronous block cipher whereas struct
|
||||
ablkcipher_alg defines an asynchronous block cipher.
|
||||
Struct skcipher_alg defines a multi-block cipher, or more generally, a
|
||||
length-preserving symmetric cipher algorithm.
|
||||
|
||||
Please refer to the single block cipher description for schematics of
|
||||
the block cipher usage.
|
||||
Scatterlist handling
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Specifics Of Asynchronous Multi-Block Cipher
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are a couple of specifics to the asynchronous interface.
|
||||
|
||||
First of all, some of the drivers will want to use the Generic
|
||||
ScatterWalk in case the hardware needs to be fed separate chunks of the
|
||||
scatterlist which contains the plaintext and will contain the
|
||||
ciphertext. Please refer to the ScatterWalk interface offered by the
|
||||
Linux kernel scatter / gather list implementation.
|
||||
Some drivers will want to use the Generic ScatterWalk in case the
|
||||
hardware needs to be fed separate chunks of the scatterlist which
|
||||
contains the plaintext and will contain the ciphertext. Please refer
|
||||
to the ScatterWalk interface offered by the Linux kernel scatter /
|
||||
gather list implementation.
|
||||
|
||||
Hashing [HASH]
|
||||
--------------
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/crypto/allwinner,sun8i-ss.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Allwinner Security System v2 driver
|
||||
|
||||
maintainers:
|
||||
- Corentin Labbe <corentin.labbe@gmail.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- allwinner,sun8i-a83t-crypto
|
||||
- allwinner,sun9i-a80-crypto
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: Bus clock
|
||||
- description: Module clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: bus
|
||||
- const: mod
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- clocks
|
||||
- clock-names
|
||||
- resets
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/clock/sun8i-a83t-ccu.h>
|
||||
#include <dt-bindings/reset/sun8i-a83t-ccu.h>
|
||||
|
||||
crypto: crypto@1c15000 {
|
||||
compatible = "allwinner,sun8i-a83t-crypto";
|
||||
reg = <0x01c15000 0x1000>;
|
||||
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
|
||||
resets = <&ccu RST_BUS_SS>;
|
||||
clocks = <&ccu CLK_BUS_SS>, <&ccu CLK_SS>;
|
||||
clock-names = "bus", "mod";
|
||||
};
|
|
@ -0,0 +1,52 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/crypto/amlogic,gxl-crypto.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Amlogic GXL Cryptographic Offloader
|
||||
|
||||
maintainers:
|
||||
- Corentin Labbe <clabbe@baylibre.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: amlogic,gxl-crypto
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
items:
|
||||
- description: "Interrupt for flow 0"
|
||||
- description: "Interrupt for flow 1"
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
clock-names:
|
||||
const: blkmv
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/clock/gxbb-clkc.h>
|
||||
|
||||
crypto: crypto-engine@c883e000 {
|
||||
compatible = "amlogic,gxl-crypto";
|
||||
reg = <0x0 0xc883e000 0x0 0x36>;
|
||||
interrupts = <GIC_SPI 188 IRQ_TYPE_EDGE_RISING>, <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&clkc CLKID_BLKMV>;
|
||||
clock-names = "blkmv";
|
||||
};
|
|
@ -1,7 +1,7 @@
|
|||
Atmel TRNG (True Random Number Generator) block
|
||||
|
||||
Required properties:
|
||||
- compatible : Should be "atmel,at91sam9g45-trng"
|
||||
- compatible : Should be "atmel,at91sam9g45-trng" or "microchip,sam9x60-trng"
|
||||
- reg : Offset and length of the register set of this block
|
||||
- interrupts : the interrupt number for the TRNG block
|
||||
- clocks: should contain the TRNG clk source
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
NPCM SoC Random Number Generator
|
||||
|
||||
Required properties:
|
||||
- compatible : "nuvoton,npcm750-rng" for the NPCM7XX BMC.
|
||||
- reg : Specifies physical base address and size of the registers.
|
||||
|
||||
Example:
|
||||
|
||||
rng: rng@f000b000 {
|
||||
compatible = "nuvoton,npcm750-rng";
|
||||
reg = <0xf000b000 0x8>;
|
||||
};
|
|
@ -0,0 +1,27 @@
|
|||
OMAP ROM RNG driver binding
|
||||
|
||||
Secure SoCs may provide RNG via secure ROM calls like Nokia N900 does. The
|
||||
implementation can depend on the SoC secure ROM used.
|
||||
|
||||
- compatible:
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: must be "nokia,n900-rom-rng"
|
||||
|
||||
- clocks:
|
||||
Usage: required
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: reference to the the RNG interface clock
|
||||
|
||||
- clock-names:
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: must be "ick"
|
||||
|
||||
Example:
|
||||
|
||||
rom_rng: rng {
|
||||
compatible = "nokia,n900-rom-rng";
|
||||
clocks = <&rng_ick>;
|
||||
clock-names = "ick";
|
||||
};
|
|
@ -0,0 +1,17 @@
|
|||
Exynos True Random Number Generator
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : Should be "samsung,exynos5250-trng".
|
||||
- reg : Specifies base physical address and size of the registers map.
|
||||
- clocks : Phandle to clock-controller plus clock-specifier pair.
|
||||
- clock-names : "secss" as a clock name.
|
||||
|
||||
Example:
|
||||
|
||||
rng@10830600 {
|
||||
compatible = "samsung,exynos5250-trng";
|
||||
reg = <0x10830600 0x100>;
|
||||
clocks = <&clock CLK_SSS>;
|
||||
clock-names = "secss";
|
||||
};
|
37
MAINTAINERS
37
MAINTAINERS
|
@ -682,11 +682,11 @@ S: Maintained
|
|||
F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
|
||||
F: drivers/cpufreq/sun50i-cpufreq-nvmem.c
|
||||
|
||||
ALLWINNER SECURITY SYSTEM
|
||||
ALLWINNER CRYPTO DRIVERS
|
||||
M: Corentin Labbe <clabbe.montjoie@gmail.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/crypto/sunxi-ss/
|
||||
F: drivers/crypto/allwinner/
|
||||
|
||||
ALLWINNER VPU DRIVER
|
||||
M: Maxime Ripard <mripard@kernel.org>
|
||||
|
@ -1470,6 +1470,14 @@ F: drivers/soc/amlogic/
|
|||
F: drivers/rtc/rtc-meson*
|
||||
N: meson
|
||||
|
||||
ARM/Amlogic Meson SoC Crypto Drivers
|
||||
M: Corentin Labbe <clabbe@baylibre.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
L: linux-amlogic@lists.infradead.org
|
||||
S: Maintained
|
||||
F: drivers/crypto/amlogic/
|
||||
F: Documentation/devicetree/bindings/crypto/amlogic*
|
||||
|
||||
ARM/Amlogic Meson SoC Sound Drivers
|
||||
M: Jerome Brunet <jbrunet@baylibre.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
|
@ -7372,6 +7380,25 @@ F: include/uapi/linux/if_hippi.h
|
|||
F: net/802/hippi.c
|
||||
F: drivers/net/hippi/
|
||||
|
||||
HISILICON SECURITY ENGINE V2 DRIVER (SEC2)
|
||||
M: Zaibo Xu <xuzaibo@huawei.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/crypto/hisilicon/sec2/sec_crypto.c
|
||||
F: drivers/crypto/hisilicon/sec2/sec_main.c
|
||||
F: drivers/crypto/hisilicon/sec2/sec_crypto.h
|
||||
F: drivers/crypto/hisilicon/sec2/sec.h
|
||||
F: Documentation/ABI/testing/debugfs-hisi-sec
|
||||
|
||||
HISILICON HIGH PERFORMANCE RSA ENGINE DRIVER (HPRE)
|
||||
M: Zaibo Xu <xuzaibo@huawei.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/crypto/hisilicon/hpre/hpre_crypto.c
|
||||
F: drivers/crypto/hisilicon/hpre/hpre_main.c
|
||||
F: drivers/crypto/hisilicon/hpre/hpre.h
|
||||
F: Documentation/ABI/testing/debugfs-hisi-hpre
|
||||
|
||||
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
|
||||
M: Yisen Zhuang <yisen.zhuang@huawei.com>
|
||||
M: Salil Mehta <salil.mehta@huawei.com>
|
||||
|
@ -7380,6 +7407,11 @@ W: http://www.hisilicon.com
|
|||
S: Maintained
|
||||
F: drivers/net/ethernet/hisilicon/hns3/
|
||||
|
||||
HISILICON TRUE RANDOM NUMBER GENERATOR V2 SUPPORT
|
||||
M: Zaibo Xu <xuzaibo@huawei.com>
|
||||
S: Maintained
|
||||
F: drivers/char/hw_random/hisi-trng-v2.c
|
||||
|
||||
HISILICON LPC BUS DRIVER
|
||||
M: john.garry@huawei.com
|
||||
W: http://www.hisilicon.com
|
||||
|
@ -7425,7 +7457,6 @@ S: Maintained
|
|||
F: drivers/crypto/hisilicon/qm.c
|
||||
F: drivers/crypto/hisilicon/qm.h
|
||||
F: drivers/crypto/hisilicon/sgl.c
|
||||
F: drivers/crypto/hisilicon/sgl.h
|
||||
F: drivers/crypto/hisilicon/zip/
|
||||
F: Documentation/ABI/testing/debugfs-hisi-zip
|
||||
|
||||
|
|
|
@ -155,6 +155,12 @@
|
|||
pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
|
||||
};
|
||||
|
||||
rom_rng: rng {
|
||||
compatible = "nokia,n900-rom-rng";
|
||||
clocks = <&rng_ick>;
|
||||
clock-names = "ick";
|
||||
};
|
||||
|
||||
/* controlled (enabled/disabled) directly by bcm2048 and wl1251 */
|
||||
vctcxo: vctcxo {
|
||||
compatible = "fixed-clock";
|
||||
|
|
|
@ -30,7 +30,7 @@ config CRYPTO_SHA1_ARM_NEON
|
|||
|
||||
config CRYPTO_SHA1_ARM_CE
|
||||
tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)"
|
||||
depends on KERNEL_MODE_NEON
|
||||
depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
|
||||
select CRYPTO_SHA1_ARM
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
|
@ -39,7 +39,7 @@ config CRYPTO_SHA1_ARM_CE
|
|||
|
||||
config CRYPTO_SHA2_ARM_CE
|
||||
tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)"
|
||||
depends on KERNEL_MODE_NEON
|
||||
depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
|
||||
select CRYPTO_SHA256_ARM
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
|
@ -81,7 +81,7 @@ config CRYPTO_AES_ARM
|
|||
config CRYPTO_AES_ARM_BS
|
||||
tristate "Bit sliced AES using NEON instructions"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
|
@ -96,8 +96,8 @@ config CRYPTO_AES_ARM_BS
|
|||
|
||||
config CRYPTO_AES_ARM_CE
|
||||
tristate "Accelerated AES using ARMv8 Crypto Extensions"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
|
@ -106,7 +106,7 @@ config CRYPTO_AES_ARM_CE
|
|||
|
||||
config CRYPTO_GHASH_ARM_CE
|
||||
tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions"
|
||||
depends on KERNEL_MODE_NEON
|
||||
depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_GF128MUL
|
||||
|
@ -118,23 +118,35 @@ config CRYPTO_GHASH_ARM_CE
|
|||
|
||||
config CRYPTO_CRCT10DIF_ARM_CE
|
||||
tristate "CRCT10DIF digest algorithm using PMULL instructions"
|
||||
depends on KERNEL_MODE_NEON && CRC_T10DIF
|
||||
depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
|
||||
depends on CRC_T10DIF
|
||||
select CRYPTO_HASH
|
||||
|
||||
config CRYPTO_CRC32_ARM_CE
|
||||
tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions"
|
||||
depends on KERNEL_MODE_NEON && CRC32
|
||||
depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
|
||||
depends on CRC32
|
||||
select CRYPTO_HASH
|
||||
|
||||
config CRYPTO_CHACHA20_NEON
|
||||
tristate "NEON accelerated ChaCha stream cipher algorithms"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_CHACHA20
|
||||
tristate "NEON and scalar accelerated ChaCha stream cipher algorithms"
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_ARCH_HAVE_LIB_CHACHA
|
||||
|
||||
config CRYPTO_POLY1305_ARM
|
||||
tristate "Accelerated scalar and SIMD Poly1305 hash implementations"
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_ARCH_HAVE_LIB_POLY1305
|
||||
|
||||
config CRYPTO_NHPOLY1305_NEON
|
||||
tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_NHPOLY1305
|
||||
|
||||
config CRYPTO_CURVE25519_NEON
|
||||
tristate "NEON accelerated Curve25519 scalar multiplication library"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_LIB_CURVE25519_GENERIC
|
||||
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
|
||||
|
||||
endif
|
||||
|
|
|
@ -10,34 +10,16 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
|
|||
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
|
||||
obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
|
||||
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
|
||||
obj-$(CONFIG_CRYPTO_CURVE25519_NEON) += curve25519-neon.o
|
||||
|
||||
ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
|
||||
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
|
||||
ce-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
|
||||
ce-obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
|
||||
ce-obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o
|
||||
crc-obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o
|
||||
|
||||
ifneq ($(crc-obj-y)$(crc-obj-m),)
|
||||
ifeq ($(call as-instr,.arch armv8-a\n.arch_extension crc,y,n),y)
|
||||
ce-obj-y += $(crc-obj-y)
|
||||
ce-obj-m += $(crc-obj-m)
|
||||
else
|
||||
$(warning These CRC Extensions modules need binutils 2.23 or higher)
|
||||
$(warning $(crc-obj-y) $(crc-obj-m))
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(ce-obj-y)$(ce-obj-m),)
|
||||
ifeq ($(call as-instr,.fpu crypto-neon-fp-armv8,y,n),y)
|
||||
obj-y += $(ce-obj-y)
|
||||
obj-m += $(ce-obj-m)
|
||||
else
|
||||
$(warning These ARMv8 Crypto Extensions modules need binutils 2.23 or higher)
|
||||
$(warning $(ce-obj-y) $(ce-obj-m))
|
||||
endif
|
||||
endif
|
||||
obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
|
||||
obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
|
||||
obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
|
||||
obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
|
||||
obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o
|
||||
obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o
|
||||
|
||||
aes-arm-y := aes-cipher-core.o aes-cipher-glue.o
|
||||
aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
|
||||
|
@ -53,13 +35,19 @@ aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
|
|||
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
|
||||
crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
|
||||
crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
|
||||
chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
|
||||
chacha-neon-y := chacha-scalar-core.o chacha-glue.o
|
||||
chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
|
||||
poly1305-arm-y := poly1305-core.o poly1305-glue.o
|
||||
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
|
||||
curve25519-neon-y := curve25519-core.o curve25519-glue.o
|
||||
|
||||
ifdef REGENERATE_ARM_CRYPTO
|
||||
quiet_cmd_perl = PERL $@
|
||||
cmd_perl = $(PERL) $(<) > $(@)
|
||||
|
||||
$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv4.pl
|
||||
$(call cmd,perl)
|
||||
|
||||
$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
|
||||
$(call cmd,perl)
|
||||
|
||||
|
@ -67,4 +55,9 @@ $(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
|
|||
$(call cmd,perl)
|
||||
endif
|
||||
|
||||
clean-files += sha256-core.S sha512-core.S
|
||||
clean-files += poly1305-core.S sha256-core.S sha512-core.S
|
||||
|
||||
# massage the perlasm code a bit so we only get the NEON routine if we need it
|
||||
poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
|
||||
poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
|
||||
AFLAGS_poly1305-core.o += $(poly1305-aflags-y)
|
||||
|
|
|
@ -0,0 +1,343 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* ARM NEON accelerated ChaCha and XChaCha stream ciphers,
|
||||
* including ChaCha20 (RFC7539)
|
||||
*
|
||||
* Copyright (C) 2016-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
|
||||
* Copyright (C) 2015 Martin Willi
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/chacha.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
|
||||
int nrounds);
|
||||
asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
|
||||
int nrounds);
|
||||
asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds);
|
||||
asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
|
||||
|
||||
asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
|
||||
const u32 *state, int nrounds);
|
||||
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon);
|
||||
|
||||
static inline bool neon_usable(void)
|
||||
{
|
||||
return static_branch_likely(&use_neon) && crypto_simd_usable();
|
||||
}
|
||||
|
||||
static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int bytes, int nrounds)
|
||||
{
|
||||
u8 buf[CHACHA_BLOCK_SIZE];
|
||||
|
||||
while (bytes >= CHACHA_BLOCK_SIZE * 4) {
|
||||
chacha_4block_xor_neon(state, dst, src, nrounds);
|
||||
bytes -= CHACHA_BLOCK_SIZE * 4;
|
||||
src += CHACHA_BLOCK_SIZE * 4;
|
||||
dst += CHACHA_BLOCK_SIZE * 4;
|
||||
state[12] += 4;
|
||||
}
|
||||
while (bytes >= CHACHA_BLOCK_SIZE) {
|
||||
chacha_block_xor_neon(state, dst, src, nrounds);
|
||||
bytes -= CHACHA_BLOCK_SIZE;
|
||||
src += CHACHA_BLOCK_SIZE;
|
||||
dst += CHACHA_BLOCK_SIZE;
|
||||
state[12]++;
|
||||
}
|
||||
if (bytes) {
|
||||
memcpy(buf, src, bytes);
|
||||
chacha_block_xor_neon(state, buf, buf, nrounds);
|
||||
memcpy(dst, buf, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) {
|
||||
hchacha_block_arm(state, stream, nrounds);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
hchacha_block_neon(state, stream, nrounds);
|
||||
kernel_neon_end();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(hchacha_block_arch);
|
||||
|
||||
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
|
||||
{
|
||||
chacha_init_generic(state, key, iv);
|
||||
}
|
||||
EXPORT_SYMBOL(chacha_init_arch);
|
||||
|
||||
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
|
||||
int nrounds)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() ||
|
||||
bytes <= CHACHA_BLOCK_SIZE) {
|
||||
chacha_doarm(dst, src, bytes, state, nrounds);
|
||||
state[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
kernel_neon_begin();
|
||||
chacha_doneon(state, dst, src, bytes, nrounds);
|
||||
kernel_neon_end();
|
||||
}
|
||||
EXPORT_SYMBOL(chacha_crypt_arch);
|
||||
|
||||
static int chacha_stream_xor(struct skcipher_request *req,
|
||||
const struct chacha_ctx *ctx, const u8 *iv,
|
||||
bool neon)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u32 state[16];
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
chacha_init_generic(state, ctx->key, iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
|
||||
if (!neon) {
|
||||
chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes, state, ctx->nrounds);
|
||||
state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
chacha_doneon(state, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, nbytes, ctx->nrounds);
|
||||
kernel_neon_end();
|
||||
}
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int do_chacha(struct skcipher_request *req, bool neon)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return chacha_stream_xor(req, ctx, req->iv, neon);
|
||||
}
|
||||
|
||||
static int chacha_arm(struct skcipher_request *req)
|
||||
{
|
||||
return do_chacha(req, false);
|
||||
}
|
||||
|
||||
static int chacha_neon(struct skcipher_request *req)
|
||||
{
|
||||
return do_chacha(req, neon_usable());
|
||||
}
|
||||
|
||||
static int do_xchacha(struct skcipher_request *req, bool neon)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct chacha_ctx subctx;
|
||||
u32 state[16];
|
||||
u8 real_iv[16];
|
||||
|
||||
chacha_init_generic(state, ctx->key, req->iv);
|
||||
|
||||
if (!neon) {
|
||||
hchacha_block_arm(state, subctx.key, ctx->nrounds);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
hchacha_block_neon(state, subctx.key, ctx->nrounds);
|
||||
kernel_neon_end();
|
||||
}
|
||||
subctx.nrounds = ctx->nrounds;
|
||||
|
||||
memcpy(&real_iv[0], req->iv + 24, 8);
|
||||
memcpy(&real_iv[8], req->iv + 16, 8);
|
||||
return chacha_stream_xor(req, &subctx, real_iv, neon);
|
||||
}
|
||||
|
||||
static int xchacha_arm(struct skcipher_request *req)
|
||||
{
|
||||
return do_xchacha(req, false);
|
||||
}
|
||||
|
||||
static int xchacha_neon(struct skcipher_request *req)
|
||||
{
|
||||
return do_xchacha(req, neon_usable());
|
||||
}
|
||||
|
||||
static struct skcipher_alg arm_algs[] = {
|
||||
{
|
||||
.base.cra_name = "chacha20",
|
||||
.base.cra_driver_name = "chacha20-arm",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = CHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = chacha_arm,
|
||||
.decrypt = chacha_arm,
|
||||
}, {
|
||||
.base.cra_name = "xchacha20",
|
||||
.base.cra_driver_name = "xchacha20-arm",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = xchacha_arm,
|
||||
.decrypt = xchacha_arm,
|
||||
}, {
|
||||
.base.cra_name = "xchacha12",
|
||||
.base.cra_driver_name = "xchacha12-arm",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = chacha12_setkey,
|
||||
.encrypt = xchacha_arm,
|
||||
.decrypt = xchacha_arm,
|
||||
},
|
||||
};
|
||||
|
||||
static struct skcipher_alg neon_algs[] = {
|
||||
{
|
||||
.base.cra_name = "chacha20",
|
||||
.base.cra_driver_name = "chacha20-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = CHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.walksize = 4 * CHACHA_BLOCK_SIZE,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = chacha_neon,
|
||||
.decrypt = chacha_neon,
|
||||
}, {
|
||||
.base.cra_name = "xchacha20",
|
||||
.base.cra_driver_name = "xchacha20-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.walksize = 4 * CHACHA_BLOCK_SIZE,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = xchacha_neon,
|
||||
.decrypt = xchacha_neon,
|
||||
}, {
|
||||
.base.cra_name = "xchacha12",
|
||||
.base.cra_driver_name = "xchacha12-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.walksize = 4 * CHACHA_BLOCK_SIZE,
|
||||
.setkey = chacha12_setkey,
|
||||
.encrypt = xchacha_neon,
|
||||
.decrypt = xchacha_neon,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init chacha_simd_mod_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
|
||||
int i;
|
||||
|
||||
switch (read_cpuid_part()) {
|
||||
case ARM_CPU_PART_CORTEX_A7:
|
||||
case ARM_CPU_PART_CORTEX_A5:
|
||||
/*
|
||||
* The Cortex-A7 and Cortex-A5 do not perform well with
|
||||
* the NEON implementation but do incredibly with the
|
||||
* scalar one and use less power.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(neon_algs); i++)
|
||||
neon_algs[i].base.cra_priority = 0;
|
||||
break;
|
||||
default:
|
||||
static_branch_enable(&use_neon);
|
||||
}
|
||||
|
||||
err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
|
||||
if (err)
|
||||
crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit chacha_simd_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
|
||||
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON))
|
||||
crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
|
||||
}
|
||||
|
||||
module_init(chacha_simd_mod_init);
|
||||
module_exit(chacha_simd_mod_fini);
|
||||
|
||||
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (scalar and NEON accelerated)");
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("chacha20");
|
||||
MODULE_ALIAS_CRYPTO("chacha20-arm");
|
||||
MODULE_ALIAS_CRYPTO("xchacha20");
|
||||
MODULE_ALIAS_CRYPTO("xchacha20-arm");
|
||||
MODULE_ALIAS_CRYPTO("xchacha12");
|
||||
MODULE_ALIAS_CRYPTO("xchacha12-arm");
|
||||
#ifdef CONFIG_KERNEL_MODE_NEON
|
||||
MODULE_ALIAS_CRYPTO("chacha20-neon");
|
||||
MODULE_ALIAS_CRYPTO("xchacha20-neon");
|
||||
MODULE_ALIAS_CRYPTO("xchacha12-neon");
|
||||
#endif
|
|
@ -1,202 +0,0 @@
|
|||
/*
|
||||
* ARM NEON accelerated ChaCha and XChaCha stream ciphers,
|
||||
* including ChaCha20 (RFC7539)
|
||||
*
|
||||
* Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Based on:
|
||||
* ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
|
||||
*
|
||||
* Copyright (C) 2015 Martin Willi
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
|
||||
int nrounds);
|
||||
asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
|
||||
int nrounds);
|
||||
asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
|
||||
|
||||
static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int bytes, int nrounds)
|
||||
{
|
||||
u8 buf[CHACHA_BLOCK_SIZE];
|
||||
|
||||
while (bytes >= CHACHA_BLOCK_SIZE * 4) {
|
||||
chacha_4block_xor_neon(state, dst, src, nrounds);
|
||||
bytes -= CHACHA_BLOCK_SIZE * 4;
|
||||
src += CHACHA_BLOCK_SIZE * 4;
|
||||
dst += CHACHA_BLOCK_SIZE * 4;
|
||||
state[12] += 4;
|
||||
}
|
||||
while (bytes >= CHACHA_BLOCK_SIZE) {
|
||||
chacha_block_xor_neon(state, dst, src, nrounds);
|
||||
bytes -= CHACHA_BLOCK_SIZE;
|
||||
src += CHACHA_BLOCK_SIZE;
|
||||
dst += CHACHA_BLOCK_SIZE;
|
||||
state[12]++;
|
||||
}
|
||||
if (bytes) {
|
||||
memcpy(buf, src, bytes);
|
||||
chacha_block_xor_neon(state, buf, buf, nrounds);
|
||||
memcpy(dst, buf, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static int chacha_neon_stream_xor(struct skcipher_request *req,
|
||||
const struct chacha_ctx *ctx, const u8 *iv)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u32 state[16];
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
crypto_chacha_init(state, ctx, iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
|
||||
kernel_neon_begin();
|
||||
chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes, ctx->nrounds);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int chacha_neon(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_chacha_crypt(req);
|
||||
|
||||
return chacha_neon_stream_xor(req, ctx, req->iv);
|
||||
}
|
||||
|
||||
static int xchacha_neon(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct chacha_ctx subctx;
|
||||
u32 state[16];
|
||||
u8 real_iv[16];
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_xchacha_crypt(req);
|
||||
|
||||
crypto_chacha_init(state, ctx, req->iv);
|
||||
|
||||
kernel_neon_begin();
|
||||
hchacha_block_neon(state, subctx.key, ctx->nrounds);
|
||||
kernel_neon_end();
|
||||
subctx.nrounds = ctx->nrounds;
|
||||
|
||||
memcpy(&real_iv[0], req->iv + 24, 8);
|
||||
memcpy(&real_iv[8], req->iv + 16, 8);
|
||||
return chacha_neon_stream_xor(req, &subctx, real_iv);
|
||||
}
|
||||
|
||||
static struct skcipher_alg algs[] = {
|
||||
{
|
||||
.base.cra_name = "chacha20",
|
||||
.base.cra_driver_name = "chacha20-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = CHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.walksize = 4 * CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.encrypt = chacha_neon,
|
||||
.decrypt = chacha_neon,
|
||||
}, {
|
||||
.base.cra_name = "xchacha20",
|
||||
.base.cra_driver_name = "xchacha20-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.walksize = 4 * CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.encrypt = xchacha_neon,
|
||||
.decrypt = xchacha_neon,
|
||||
}, {
|
||||
.base.cra_name = "xchacha12",
|
||||
.base.cra_driver_name = "xchacha12-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.walksize = 4 * CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha12_setkey,
|
||||
.encrypt = xchacha_neon,
|
||||
.decrypt = xchacha_neon,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init chacha_simd_mod_init(void)
|
||||
{
|
||||
if (!(elf_hwcap & HWCAP_NEON))
|
||||
return -ENODEV;
|
||||
|
||||
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
static void __exit chacha_simd_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
module_init(chacha_simd_mod_init);
|
||||
module_exit(chacha_simd_mod_fini);
|
||||
|
||||
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)");
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("chacha20");
|
||||
MODULE_ALIAS_CRYPTO("chacha20-neon");
|
||||
MODULE_ALIAS_CRYPTO("xchacha20");
|
||||
MODULE_ALIAS_CRYPTO("xchacha20-neon");
|
||||
MODULE_ALIAS_CRYPTO("xchacha12");
|
||||
MODULE_ALIAS_CRYPTO("xchacha12-neon");
|
|
@ -0,0 +1,460 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2018 Google, Inc.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
/*
|
||||
* Design notes:
|
||||
*
|
||||
* 16 registers would be needed to hold the state matrix, but only 14 are
|
||||
* available because 'sp' and 'pc' cannot be used. So we spill the elements
|
||||
* (x8, x9) to the stack and swap them out with (x10, x11). This adds one
|
||||
* 'ldrd' and one 'strd' instruction per round.
|
||||
*
|
||||
* All rotates are performed using the implicit rotate operand accepted by the
|
||||
* 'add' and 'eor' instructions. This is faster than using explicit rotate
|
||||
* instructions. To make this work, we allow the values in the second and last
|
||||
* rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the
|
||||
* wrong rotation amount. The rotation amount is then fixed up just in time
|
||||
* when the values are used. 'brot' is the number of bits the values in row 'b'
|
||||
* need to be rotated right to arrive at the correct values, and 'drot'
|
||||
* similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such
|
||||
* that they end up as (25, 24) after every round.
|
||||
*/
|
||||
|
||||
// ChaCha state registers
|
||||
X0 .req r0
|
||||
X1 .req r1
|
||||
X2 .req r2
|
||||
X3 .req r3
|
||||
X4 .req r4
|
||||
X5 .req r5
|
||||
X6 .req r6
|
||||
X7 .req r7
|
||||
X8_X10 .req r8 // shared by x8 and x10
|
||||
X9_X11 .req r9 // shared by x9 and x11
|
||||
X12 .req r10
|
||||
X13 .req r11
|
||||
X14 .req r12
|
||||
X15 .req r14
|
||||
|
||||
.macro __rev out, in, t0, t1, t2
|
||||
.if __LINUX_ARM_ARCH__ >= 6
|
||||
rev \out, \in
|
||||
.else
|
||||
lsl \t0, \in, #24
|
||||
and \t1, \in, #0xff00
|
||||
and \t2, \in, #0xff0000
|
||||
orr \out, \t0, \in, lsr #24
|
||||
orr \out, \out, \t1, lsl #8
|
||||
orr \out, \out, \t2, lsr #8
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro _le32_bswap x, t0, t1, t2
|
||||
#ifdef __ARMEB__
|
||||
__rev \x, \x, \t0, \t1, \t2
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro _le32_bswap_4x a, b, c, d, t0, t1, t2
|
||||
_le32_bswap \a, \t0, \t1, \t2
|
||||
_le32_bswap \b, \t0, \t1, \t2
|
||||
_le32_bswap \c, \t0, \t1, \t2
|
||||
_le32_bswap \d, \t0, \t1, \t2
|
||||
.endm
|
||||
|
||||
.macro __ldrd a, b, src, offset
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
ldrd \a, \b, [\src, #\offset]
|
||||
#else
|
||||
ldr \a, [\src, #\offset]
|
||||
ldr \b, [\src, #\offset + 4]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro __strd a, b, dst, offset
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
strd \a, \b, [\dst, #\offset]
|
||||
#else
|
||||
str \a, [\dst, #\offset]
|
||||
str \b, [\dst, #\offset + 4]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro _halfround a1, b1, c1, d1, a2, b2, c2, d2
|
||||
|
||||
// a += b; d ^= a; d = rol(d, 16);
|
||||
add \a1, \a1, \b1, ror #brot
|
||||
add \a2, \a2, \b2, ror #brot
|
||||
eor \d1, \a1, \d1, ror #drot
|
||||
eor \d2, \a2, \d2, ror #drot
|
||||
// drot == 32 - 16 == 16
|
||||
|
||||
// c += d; b ^= c; b = rol(b, 12);
|
||||
add \c1, \c1, \d1, ror #16
|
||||
add \c2, \c2, \d2, ror #16
|
||||
eor \b1, \c1, \b1, ror #brot
|
||||
eor \b2, \c2, \b2, ror #brot
|
||||
// brot == 32 - 12 == 20
|
||||
|
||||
// a += b; d ^= a; d = rol(d, 8);
|
||||
add \a1, \a1, \b1, ror #20
|
||||
add \a2, \a2, \b2, ror #20
|
||||
eor \d1, \a1, \d1, ror #16
|
||||
eor \d2, \a2, \d2, ror #16
|
||||
// drot == 32 - 8 == 24
|
||||
|
||||
// c += d; b ^= c; b = rol(b, 7);
|
||||
add \c1, \c1, \d1, ror #24
|
||||
add \c2, \c2, \d2, ror #24
|
||||
eor \b1, \c1, \b1, ror #20
|
||||
eor \b2, \c2, \b2, ror #20
|
||||
// brot == 32 - 7 == 25
|
||||
.endm
|
||||
|
||||
.macro _doubleround
|
||||
|
||||
// column round
|
||||
|
||||
// quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13)
|
||||
_halfround X0, X4, X8_X10, X12, X1, X5, X9_X11, X13
|
||||
|
||||
// save (x8, x9); restore (x10, x11)
|
||||
__strd X8_X10, X9_X11, sp, 0
|
||||
__ldrd X8_X10, X9_X11, sp, 8
|
||||
|
||||
// quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15)
|
||||
_halfround X2, X6, X8_X10, X14, X3, X7, X9_X11, X15
|
||||
|
||||
.set brot, 25
|
||||
.set drot, 24
|
||||
|
||||
// diagonal round
|
||||
|
||||
// quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12)
|
||||
_halfround X0, X5, X8_X10, X15, X1, X6, X9_X11, X12
|
||||
|
||||
// save (x10, x11); restore (x8, x9)
|
||||
__strd X8_X10, X9_X11, sp, 8
|
||||
__ldrd X8_X10, X9_X11, sp, 0
|
||||
|
||||
// quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14)
|
||||
_halfround X2, X7, X8_X10, X13, X3, X4, X9_X11, X14
|
||||
.endm
|
||||
|
||||
.macro _chacha_permute nrounds
|
||||
.set brot, 0
|
||||
.set drot, 0
|
||||
.rept \nrounds / 2
|
||||
_doubleround
|
||||
.endr
|
||||
.endm
|
||||
|
||||
.macro _chacha nrounds
|
||||
|
||||
.Lnext_block\@:
|
||||
// Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN
|
||||
// Registers contain x0-x9,x12-x15.
|
||||
|
||||
// Do the core ChaCha permutation to update x0-x15.
|
||||
_chacha_permute \nrounds
|
||||
|
||||
add sp, #8
|
||||
// Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN
|
||||
// Registers contain x0-x9,x12-x15.
|
||||
// x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
|
||||
|
||||
// Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15).
|
||||
push {X8_X10, X9_X11, X12, X13, X14, X15}
|
||||
|
||||
// Load (OUT, IN, LEN).
|
||||
ldr r14, [sp, #96]
|
||||
ldr r12, [sp, #100]
|
||||
ldr r11, [sp, #104]
|
||||
|
||||
orr r10, r14, r12
|
||||
|
||||
// Use slow path if fewer than 64 bytes remain.
|
||||
cmp r11, #64
|
||||
blt .Lxor_slowpath\@
|
||||
|
||||
// Use slow path if IN and/or OUT isn't 4-byte aligned. Needed even on
|
||||
// ARMv6+, since ldmia and stmia (used below) still require alignment.
|
||||
tst r10, #3
|
||||
bne .Lxor_slowpath\@
|
||||
|
||||
// Fast path: XOR 64 bytes of aligned data.
|
||||
|
||||
// Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
|
||||
// Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT.
|
||||
// x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
|
||||
|
||||
// x0-x3
|
||||
__ldrd r8, r9, sp, 32
|
||||
__ldrd r10, r11, sp, 40
|
||||
add X0, X0, r8
|
||||
add X1, X1, r9
|
||||
add X2, X2, r10
|
||||
add X3, X3, r11
|
||||
_le32_bswap_4x X0, X1, X2, X3, r8, r9, r10
|
||||
ldmia r12!, {r8-r11}
|
||||
eor X0, X0, r8
|
||||
eor X1, X1, r9
|
||||
eor X2, X2, r10
|
||||
eor X3, X3, r11
|
||||
stmia r14!, {X0-X3}
|
||||
|
||||
// x4-x7
|
||||
__ldrd r8, r9, sp, 48
|
||||
__ldrd r10, r11, sp, 56
|
||||
add X4, r8, X4, ror #brot
|
||||
add X5, r9, X5, ror #brot
|
||||
ldmia r12!, {X0-X3}
|
||||
add X6, r10, X6, ror #brot
|
||||
add X7, r11, X7, ror #brot
|
||||
_le32_bswap_4x X4, X5, X6, X7, r8, r9, r10
|
||||
eor X4, X4, X0
|
||||
eor X5, X5, X1
|
||||
eor X6, X6, X2
|
||||
eor X7, X7, X3
|
||||
stmia r14!, {X4-X7}
|
||||
|
||||
// x8-x15
|
||||
pop {r0-r7} // (x8-x9,x12-x15,x10-x11)
|
||||
__ldrd r8, r9, sp, 32
|
||||
__ldrd r10, r11, sp, 40
|
||||
add r0, r0, r8 // x8
|
||||
add r1, r1, r9 // x9
|
||||
add r6, r6, r10 // x10
|
||||
add r7, r7, r11 // x11
|
||||
_le32_bswap_4x r0, r1, r6, r7, r8, r9, r10
|
||||
ldmia r12!, {r8-r11}
|
||||
eor r0, r0, r8 // x8
|
||||
eor r1, r1, r9 // x9
|
||||
eor r6, r6, r10 // x10
|
||||
eor r7, r7, r11 // x11
|
||||
stmia r14!, {r0,r1,r6,r7}
|
||||
ldmia r12!, {r0,r1,r6,r7}
|
||||
__ldrd r8, r9, sp, 48
|
||||
__ldrd r10, r11, sp, 56
|
||||
add r2, r8, r2, ror #drot // x12
|
||||
add r3, r9, r3, ror #drot // x13
|
||||
add r4, r10, r4, ror #drot // x14
|
||||
add r5, r11, r5, ror #drot // x15
|
||||
_le32_bswap_4x r2, r3, r4, r5, r9, r10, r11
|
||||
ldr r9, [sp, #72] // load LEN
|
||||
eor r2, r2, r0 // x12
|
||||
eor r3, r3, r1 // x13
|
||||
eor r4, r4, r6 // x14
|
||||
eor r5, r5, r7 // x15
|
||||
subs r9, #64 // decrement and check LEN
|
||||
stmia r14!, {r2-r5}
|
||||
|
||||
beq .Ldone\@
|
||||
|
||||
.Lprepare_for_next_block\@:
|
||||
|
||||
// Stack: x0-x15 OUT IN LEN
|
||||
|
||||
// Increment block counter (x12)
|
||||
add r8, #1
|
||||
|
||||
// Store updated (OUT, IN, LEN)
|
||||
str r14, [sp, #64]
|
||||
str r12, [sp, #68]
|
||||
str r9, [sp, #72]
|
||||
|
||||
mov r14, sp
|
||||
|
||||
// Store updated block counter (x12)
|
||||
str r8, [sp, #48]
|
||||
|
||||
sub sp, #16
|
||||
|
||||
// Reload state and do next block
|
||||
ldmia r14!, {r0-r11} // load x0-x11
|
||||
__strd r10, r11, sp, 8 // store x10-x11 before state
|
||||
ldmia r14, {r10-r12,r14} // load x12-x15
|
||||
b .Lnext_block\@
|
||||
|
||||
.Lxor_slowpath\@:
|
||||
// Slow path: < 64 bytes remaining, or unaligned input or output buffer.
|
||||
// We handle it by storing the 64 bytes of keystream to the stack, then
|
||||
// XOR-ing the needed portion with the data.
|
||||
|
||||
// Allocate keystream buffer
|
||||
sub sp, #64
|
||||
mov r14, sp
|
||||
|
||||
// Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
|
||||
// Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0.
|
||||
// x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
|
||||
|
||||
// Save keystream for x0-x3
|
||||
__ldrd r8, r9, sp, 96
|
||||
__ldrd r10, r11, sp, 104
|
||||
add X0, X0, r8
|
||||
add X1, X1, r9
|
||||
add X2, X2, r10
|
||||
add X3, X3, r11
|
||||
_le32_bswap_4x X0, X1, X2, X3, r8, r9, r10
|
||||
stmia r14!, {X0-X3}
|
||||
|
||||
// Save keystream for x4-x7
|
||||
__ldrd r8, r9, sp, 112
|
||||
__ldrd r10, r11, sp, 120
|
||||
add X4, r8, X4, ror #brot
|
||||
add X5, r9, X5, ror #brot
|
||||
add X6, r10, X6, ror #brot
|
||||
add X7, r11, X7, ror #brot
|
||||
_le32_bswap_4x X4, X5, X6, X7, r8, r9, r10
|
||||
add r8, sp, #64
|
||||
stmia r14!, {X4-X7}
|
||||
|
||||
// Save keystream for x8-x15
|
||||
ldm r8, {r0-r7} // (x8-x9,x12-x15,x10-x11)
|
||||
__ldrd r8, r9, sp, 128
|
||||
__ldrd r10, r11, sp, 136
|
||||
add r0, r0, r8 // x8
|
||||
add r1, r1, r9 // x9
|
||||
add r6, r6, r10 // x10
|
||||
add r7, r7, r11 // x11
|
||||
_le32_bswap_4x r0, r1, r6, r7, r8, r9, r10
|
||||
stmia r14!, {r0,r1,r6,r7}
|
||||
__ldrd r8, r9, sp, 144
|
||||
__ldrd r10, r11, sp, 152
|
||||
add r2, r8, r2, ror #drot // x12
|
||||
add r3, r9, r3, ror #drot // x13
|
||||
add r4, r10, r4, ror #drot // x14
|
||||
add r5, r11, r5, ror #drot // x15
|
||||
_le32_bswap_4x r2, r3, r4, r5, r9, r10, r11
|
||||
stmia r14, {r2-r5}
|
||||
|
||||
// Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN
|
||||
// Registers: r8 is block counter, r12 is IN.
|
||||
|
||||
ldr r9, [sp, #168] // LEN
|
||||
ldr r14, [sp, #160] // OUT
|
||||
cmp r9, #64
|
||||
mov r0, sp
|
||||
movle r1, r9
|
||||
movgt r1, #64
|
||||
// r1 is number of bytes to XOR, in range [1, 64]
|
||||
|
||||
.if __LINUX_ARM_ARCH__ < 6
|
||||
orr r2, r12, r14
|
||||
tst r2, #3 // IN or OUT misaligned?
|
||||
bne .Lxor_next_byte\@
|
||||
.endif
|
||||
|
||||
// XOR a word at a time
|
||||
.rept 16
|
||||
subs r1, #4
|
||||
blt .Lxor_words_done\@
|
||||
ldr r2, [r12], #4
|
||||
ldr r3, [r0], #4
|
||||
eor r2, r2, r3
|
||||
str r2, [r14], #4
|
||||
.endr
|
||||
b .Lxor_slowpath_done\@
|
||||
.Lxor_words_done\@:
|
||||
ands r1, r1, #3
|
||||
beq .Lxor_slowpath_done\@
|
||||
|
||||
// XOR a byte at a time
|
||||
.Lxor_next_byte\@:
|
||||
ldrb r2, [r12], #1
|
||||
ldrb r3, [r0], #1
|
||||
eor r2, r2, r3
|
||||
strb r2, [r14], #1
|
||||
subs r1, #1
|
||||
bne .Lxor_next_byte\@
|
||||
|
||||
.Lxor_slowpath_done\@:
|
||||
subs r9, #64
|
||||
add sp, #96
|
||||
bgt .Lprepare_for_next_block\@
|
||||
|
||||
.Ldone\@:
|
||||
.endm // _chacha
|
||||
|
||||
/*
|
||||
* void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
|
||||
* const u32 *state, int nrounds);
|
||||
*/
|
||||
ENTRY(chacha_doarm)
|
||||
cmp r2, #0 // len == 0?
|
||||
reteq lr
|
||||
|
||||
ldr ip, [sp]
|
||||
cmp ip, #12
|
||||
|
||||
push {r0-r2,r4-r11,lr}
|
||||
|
||||
// Push state x0-x15 onto stack.
|
||||
// Also store an extra copy of x10-x11 just before the state.
|
||||
|
||||
add X12, r3, #48
|
||||
ldm X12, {X12,X13,X14,X15}
|
||||
push {X12,X13,X14,X15}
|
||||
sub sp, sp, #64
|
||||
|
||||
__ldrd X8_X10, X9_X11, r3, 40
|
||||
__strd X8_X10, X9_X11, sp, 8
|
||||
__strd X8_X10, X9_X11, sp, 56
|
||||
ldm r3, {X0-X9_X11}
|
||||
__strd X0, X1, sp, 16
|
||||
__strd X2, X3, sp, 24
|
||||
__strd X4, X5, sp, 32
|
||||
__strd X6, X7, sp, 40
|
||||
__strd X8_X10, X9_X11, sp, 48
|
||||
|
||||
beq 1f
|
||||
_chacha 20
|
||||
|
||||
0: add sp, #76
|
||||
pop {r4-r11, pc}
|
||||
|
||||
1: _chacha 12
|
||||
b 0b
|
||||
ENDPROC(chacha_doarm)
|
||||
|
||||
/*
|
||||
* void hchacha_block_arm(const u32 state[16], u32 out[8], int nrounds);
|
||||
*/
|
||||
ENTRY(hchacha_block_arm)
|
||||
push {r1,r4-r11,lr}
|
||||
|
||||
cmp r2, #12 // ChaCha12 ?
|
||||
|
||||
mov r14, r0
|
||||
ldmia r14!, {r0-r11} // load x0-x11
|
||||
push {r10-r11} // store x10-x11 to stack
|
||||
ldm r14, {r10-r12,r14} // load x12-x15
|
||||
sub sp, #8
|
||||
|
||||
beq 1f
|
||||
_chacha_permute 20
|
||||
|
||||
// Skip over (unused0-unused1, x10-x11)
|
||||
0: add sp, #16
|
||||
|
||||
// Fix up rotations of x12-x15
|
||||
ror X12, X12, #drot
|
||||
ror X13, X13, #drot
|
||||
pop {r4} // load 'out'
|
||||
ror X14, X14, #drot
|
||||
ror X15, X15, #drot
|
||||
|
||||
// Store (x0-x3,x12-x15) to 'out'
|
||||
stm r4, {X0,X1,X2,X3,X12,X13,X14,X15}
|
||||
|
||||
pop {r4-r11,pc}
|
||||
|
||||
1: _chacha_permute 12
|
||||
b 0b
|
||||
ENDPROC(hchacha_block_arm)
|
|
@ -72,7 +72,7 @@
|
|||
#endif
|
||||
|
||||
.text
|
||||
.arch armv7-a
|
||||
.arch armv8-a
|
||||
.fpu crypto-neon-fp-armv8
|
||||
|
||||
init_crc .req r0
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,127 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/*
|
||||
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*
|
||||
* Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This
|
||||
* began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been
|
||||
* manually reworked for use in kernel space.
|
||||
*/
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/internal/kpp.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <crypto/curve25519.h>
|
||||
|
||||
asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE],
|
||||
const u8 secret[CURVE25519_KEY_SIZE],
|
||||
const u8 basepoint[CURVE25519_KEY_SIZE]);
|
||||
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
||||
|
||||
void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
|
||||
const u8 scalar[CURVE25519_KEY_SIZE],
|
||||
const u8 point[CURVE25519_KEY_SIZE])
|
||||
{
|
||||
if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
|
||||
kernel_neon_begin();
|
||||
curve25519_neon(out, scalar, point);
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
curve25519_generic(out, scalar, point);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(curve25519_arch);
|
||||
|
||||
static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
|
||||
unsigned int len)
|
||||
{
|
||||
u8 *secret = kpp_tfm_ctx(tfm);
|
||||
|
||||
if (!len)
|
||||
curve25519_generate_secret(secret);
|
||||
else if (len == CURVE25519_KEY_SIZE &&
|
||||
crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
|
||||
memcpy(secret, buf, CURVE25519_KEY_SIZE);
|
||||
else
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int curve25519_compute_value(struct kpp_request *req)
|
||||
{
|
||||
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
|
||||
const u8 *secret = kpp_tfm_ctx(tfm);
|
||||
u8 public_key[CURVE25519_KEY_SIZE];
|
||||
u8 buf[CURVE25519_KEY_SIZE];
|
||||
int copied, nbytes;
|
||||
u8 const *bp;
|
||||
|
||||
if (req->src) {
|
||||
copied = sg_copy_to_buffer(req->src,
|
||||
sg_nents_for_len(req->src,
|
||||
CURVE25519_KEY_SIZE),
|
||||
public_key, CURVE25519_KEY_SIZE);
|
||||
if (copied != CURVE25519_KEY_SIZE)
|
||||
return -EINVAL;
|
||||
bp = public_key;
|
||||
} else {
|
||||
bp = curve25519_base_point;
|
||||
}
|
||||
|
||||
curve25519_arch(buf, secret, bp);
|
||||
|
||||
/* might want less than we've got */
|
||||
nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
|
||||
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
|
||||
nbytes),
|
||||
buf, nbytes);
|
||||
if (copied != nbytes)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
|
||||
{
|
||||
return CURVE25519_KEY_SIZE;
|
||||
}
|
||||
|
||||
static struct kpp_alg curve25519_alg = {
|
||||
.base.cra_name = "curve25519",
|
||||
.base.cra_driver_name = "curve25519-neon",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_ctxsize = CURVE25519_KEY_SIZE,
|
||||
|
||||
.set_secret = curve25519_set_secret,
|
||||
.generate_public_key = curve25519_compute_value,
|
||||
.compute_shared_secret = curve25519_compute_value,
|
||||
.max_size = curve25519_max_size,
|
||||
};
|
||||
|
||||
static int __init mod_init(void)
|
||||
{
|
||||
if (elf_hwcap & HWCAP_NEON) {
|
||||
static_branch_enable(&have_neon);
|
||||
return crypto_register_kpp(&curve25519_alg);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit mod_exit(void)
|
||||
{
|
||||
if (elf_hwcap & HWCAP_NEON)
|
||||
crypto_unregister_kpp(&curve25519_alg);
|
||||
}
|
||||
|
||||
module_init(mod_init);
|
||||
module_exit(mod_exit);
|
||||
|
||||
MODULE_ALIAS_CRYPTO("curve25519");
|
||||
MODULE_ALIAS_CRYPTO("curve25519-neon");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -88,6 +88,7 @@
|
|||
T3_H .req d17
|
||||
|
||||
.text
|
||||
.arch armv8-a
|
||||
.fpu crypto-neon-fp-armv8
|
||||
|
||||
.macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,276 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* OpenSSL/Cryptogams accelerated Poly1305 transform for ARM
|
||||
*
|
||||
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/poly1305.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
void poly1305_init_arm(void *state, const u8 *key);
|
||||
void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit);
|
||||
void poly1305_emit_arm(void *state, __le32 *digest, const u32 *nonce);
|
||||
|
||||
void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
|
||||
{
|
||||
}
|
||||
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
||||
|
||||
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
|
||||
{
|
||||
poly1305_init_arm(&dctx->h, key);
|
||||
dctx->s[0] = get_unaligned_le32(key + 16);
|
||||
dctx->s[1] = get_unaligned_le32(key + 20);
|
||||
dctx->s[2] = get_unaligned_le32(key + 24);
|
||||
dctx->s[3] = get_unaligned_le32(key + 28);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_init_arch);
|
||||
|
||||
static int arm_poly1305_init(struct shash_desc *desc)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
dctx->buflen = 0;
|
||||
dctx->rset = 0;
|
||||
dctx->sset = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void arm_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
|
||||
u32 len, u32 hibit, bool do_neon)
|
||||
{
|
||||
if (unlikely(!dctx->sset)) {
|
||||
if (!dctx->rset) {
|
||||
poly1305_init_arm(&dctx->h, src);
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
len -= POLY1305_BLOCK_SIZE;
|
||||
dctx->rset = 1;
|
||||
}
|
||||
if (len >= POLY1305_BLOCK_SIZE) {
|
||||
dctx->s[0] = get_unaligned_le32(src + 0);
|
||||
dctx->s[1] = get_unaligned_le32(src + 4);
|
||||
dctx->s[2] = get_unaligned_le32(src + 8);
|
||||
dctx->s[3] = get_unaligned_le32(src + 12);
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
len -= POLY1305_BLOCK_SIZE;
|
||||
dctx->sset = true;
|
||||
}
|
||||
if (len < POLY1305_BLOCK_SIZE)
|
||||
return;
|
||||
}
|
||||
|
||||
len &= ~(POLY1305_BLOCK_SIZE - 1);
|
||||
|
||||
if (static_branch_likely(&have_neon) && likely(do_neon))
|
||||
poly1305_blocks_neon(&dctx->h, src, len, hibit);
|
||||
else
|
||||
poly1305_blocks_arm(&dctx->h, src, len, hibit);
|
||||
}
|
||||
|
||||
static void arm_poly1305_do_update(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, u32 len, bool do_neon)
|
||||
{
|
||||
if (unlikely(dctx->buflen)) {
|
||||
u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
|
||||
memcpy(dctx->buf + dctx->buflen, src, bytes);
|
||||
src += bytes;
|
||||
len -= bytes;
|
||||
dctx->buflen += bytes;
|
||||
|
||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||
arm_poly1305_blocks(dctx, dctx->buf,
|
||||
POLY1305_BLOCK_SIZE, 1, false);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(len >= POLY1305_BLOCK_SIZE)) {
|
||||
arm_poly1305_blocks(dctx, src, len, 1, do_neon);
|
||||
src += round_down(len, POLY1305_BLOCK_SIZE);
|
||||
len %= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (unlikely(len)) {
|
||||
dctx->buflen = len;
|
||||
memcpy(dctx->buf, src, len);
|
||||
}
|
||||
}
|
||||
|
||||
static int arm_poly1305_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
arm_poly1305_do_update(dctx, src, srclen, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused arm_poly1305_update_neon(struct shash_desc *desc,
|
||||
const u8 *src,
|
||||
unsigned int srclen)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
bool do_neon = crypto_simd_usable() && srclen > 128;
|
||||
|
||||
if (static_branch_likely(&have_neon) && do_neon)
|
||||
kernel_neon_begin();
|
||||
arm_poly1305_do_update(dctx, src, srclen, do_neon);
|
||||
if (static_branch_likely(&have_neon) && do_neon)
|
||||
kernel_neon_end();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
bool do_neon = IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
|
||||
crypto_simd_usable();
|
||||
|
||||
if (unlikely(dctx->buflen)) {
|
||||
u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
|
||||
memcpy(dctx->buf + dctx->buflen, src, bytes);
|
||||
src += bytes;
|
||||
nbytes -= bytes;
|
||||
dctx->buflen += bytes;
|
||||
|
||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||
poly1305_blocks_arm(&dctx->h, dctx->buf,
|
||||
POLY1305_BLOCK_SIZE, 1);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
|
||||
unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
|
||||
|
||||
if (static_branch_likely(&have_neon) && do_neon) {
|
||||
kernel_neon_begin();
|
||||
poly1305_blocks_neon(&dctx->h, src, len, 1);
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
poly1305_blocks_arm(&dctx->h, src, len, 1);
|
||||
}
|
||||
src += len;
|
||||
nbytes %= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (unlikely(nbytes)) {
|
||||
dctx->buflen = nbytes;
|
||||
memcpy(dctx->buf, src, nbytes);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_update_arch);
|
||||
|
||||
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
||||
{
|
||||
__le32 digest[4];
|
||||
u64 f = 0;
|
||||
|
||||
if (unlikely(dctx->buflen)) {
|
||||
dctx->buf[dctx->buflen++] = 1;
|
||||
memset(dctx->buf + dctx->buflen, 0,
|
||||
POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
||||
}
|
||||
|
||||
poly1305_emit_arm(&dctx->h, digest, dctx->s);
|
||||
|
||||
/* mac = (h + s) % (2^128) */
|
||||
f = (f >> 32) + le32_to_cpu(digest[0]);
|
||||
put_unaligned_le32(f, dst);
|
||||
f = (f >> 32) + le32_to_cpu(digest[1]);
|
||||
put_unaligned_le32(f, dst + 4);
|
||||
f = (f >> 32) + le32_to_cpu(digest[2]);
|
||||
put_unaligned_le32(f, dst + 8);
|
||||
f = (f >> 32) + le32_to_cpu(digest[3]);
|
||||
put_unaligned_le32(f, dst + 12);
|
||||
|
||||
*dctx = (struct poly1305_desc_ctx){};
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_final_arch);
|
||||
|
||||
static int arm_poly1305_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
if (unlikely(!dctx->sset))
|
||||
return -ENOKEY;
|
||||
|
||||
poly1305_final_arch(dctx, dst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg arm_poly1305_algs[] = {{
|
||||
.init = arm_poly1305_init,
|
||||
.update = arm_poly1305_update,
|
||||
.final = arm_poly1305_final,
|
||||
.digestsize = POLY1305_DIGEST_SIZE,
|
||||
.descsize = sizeof(struct poly1305_desc_ctx),
|
||||
|
||||
.base.cra_name = "poly1305",
|
||||
.base.cra_driver_name = "poly1305-arm",
|
||||
.base.cra_priority = 150,
|
||||
.base.cra_blocksize = POLY1305_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
#ifdef CONFIG_KERNEL_MODE_NEON
|
||||
}, {
|
||||
.init = arm_poly1305_init,
|
||||
.update = arm_poly1305_update_neon,
|
||||
.final = arm_poly1305_final,
|
||||
.digestsize = POLY1305_DIGEST_SIZE,
|
||||
.descsize = sizeof(struct poly1305_desc_ctx),
|
||||
|
||||
.base.cra_name = "poly1305",
|
||||
.base.cra_driver_name = "poly1305-neon",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = POLY1305_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
#endif
|
||||
}};
|
||||
|
||||
static int __init arm_poly1305_mod_init(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
|
||||
(elf_hwcap & HWCAP_NEON))
|
||||
static_branch_enable(&have_neon);
|
||||
else
|
||||
/* register only the first entry */
|
||||
return crypto_register_shash(&arm_poly1305_algs[0]);
|
||||
|
||||
return crypto_register_shashes(arm_poly1305_algs,
|
||||
ARRAY_SIZE(arm_poly1305_algs));
|
||||
}
|
||||
|
||||
static void __exit arm_poly1305_mod_exit(void)
|
||||
{
|
||||
if (!static_branch_likely(&have_neon)) {
|
||||
crypto_unregister_shash(&arm_poly1305_algs[0]);
|
||||
return;
|
||||
}
|
||||
crypto_unregister_shashes(arm_poly1305_algs,
|
||||
ARRAY_SIZE(arm_poly1305_algs));
|
||||
}
|
||||
|
||||
module_init(arm_poly1305_mod_init);
|
||||
module_exit(arm_poly1305_mod_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("poly1305");
|
||||
MODULE_ALIAS_CRYPTO("poly1305-arm");
|
||||
MODULE_ALIAS_CRYPTO("poly1305-neon");
|
|
@ -10,6 +10,7 @@
|
|||
#include <asm/assembler.h>
|
||||
|
||||
.text
|
||||
.arch armv8-a
|
||||
.fpu crypto-neon-fp-armv8
|
||||
|
||||
k0 .req q0
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <asm/assembler.h>
|
||||
|
||||
.text
|
||||
.arch armv8-a
|
||||
.fpu crypto-neon-fp-armv8
|
||||
|
||||
k0 .req q7
|
||||
|
|
|
@ -269,21 +269,13 @@ static void __init am3517_evm_legacy_init(void)
|
|||
am35xx_emac_reset();
|
||||
}
|
||||
|
||||
static struct platform_device omap3_rom_rng_device = {
|
||||
.name = "omap3-rom-rng",
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.platform_data = rx51_secure_rng_call,
|
||||
},
|
||||
};
|
||||
|
||||
static void __init nokia_n900_legacy_init(void)
|
||||
{
|
||||
hsmmc2_internal_input_clk();
|
||||
mmc_pdata[0].name = "external";
|
||||
mmc_pdata[1].name = "internal";
|
||||
|
||||
if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
|
||||
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
|
||||
if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) {
|
||||
pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
|
||||
/* set IBE to 1 */
|
||||
|
@ -292,9 +284,6 @@ static void __init nokia_n900_legacy_init(void)
|
|||
pr_warn("RX-51: Not enabling ARM errata 430973 workaround\n");
|
||||
pr_warn("Thumb binaries may crash randomly without this workaround\n");
|
||||
}
|
||||
|
||||
pr_info("RX-51: Registering OMAP3 HWRNG device\n");
|
||||
platform_device_register(&omap3_rom_rng_device);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -638,6 +627,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
|
|||
OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
|
||||
OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0",
|
||||
&am35xx_emac_pdata),
|
||||
OF_DEV_AUXDATA("nokia,n900-rom-rng", 0, NULL, rx51_secure_rng_call),
|
||||
/* McBSP modules with sidetone core */
|
||||
#if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP)
|
||||
OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata),
|
||||
|
|
|
@ -67,7 +67,7 @@ config ARM64
|
|||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
select ARCH_SUPPORTS_MEMORY_FAILURE
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
|
||||
|
|
|
@ -86,7 +86,7 @@ config CRYPTO_AES_ARM64_CE_CCM
|
|||
config CRYPTO_AES_ARM64_CE_BLK
|
||||
tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_AES_ARM64_CE
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_SIMD
|
||||
|
@ -94,7 +94,7 @@ config CRYPTO_AES_ARM64_CE_BLK
|
|||
config CRYPTO_AES_ARM64_NEON_BLK
|
||||
tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_SIMD
|
||||
|
@ -102,8 +102,15 @@ config CRYPTO_AES_ARM64_NEON_BLK
|
|||
config CRYPTO_CHACHA20_NEON
|
||||
tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_CHACHA20
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_LIB_CHACHA_GENERIC
|
||||
select CRYPTO_ARCH_HAVE_LIB_CHACHA
|
||||
|
||||
config CRYPTO_POLY1305_NEON
|
||||
tristate "Poly1305 hash function using scalar or NEON instructions"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_ARCH_HAVE_LIB_POLY1305
|
||||
|
||||
config CRYPTO_NHPOLY1305_NEON
|
||||
tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)"
|
||||
|
@ -113,7 +120,7 @@ config CRYPTO_NHPOLY1305_NEON
|
|||
config CRYPTO_AES_ARM64_BS
|
||||
tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_AES_ARM64_NEON_BLK
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_LIB_AES
|
||||
|
|
|
@ -50,6 +50,10 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
|
|||
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
|
||||
chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_POLY1305_NEON) += poly1305-neon.o
|
||||
poly1305-neon-y := poly1305-core.o poly1305-glue.o
|
||||
AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_init_arm64
|
||||
|
||||
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
|
||||
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
|
||||
|
||||
|
@ -68,11 +72,15 @@ ifdef REGENERATE_ARM64_CRYPTO
|
|||
quiet_cmd_perlasm = PERLASM $@
|
||||
cmd_perlasm = $(PERL) $(<) void $(@)
|
||||
|
||||
$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv8.pl
|
||||
$(call cmd,perlasm)
|
||||
|
||||
$(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
|
||||
$(call cmd,perlasm)
|
||||
|
||||
$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
|
||||
$(call cmd,perlasm)
|
||||
|
||||
endif
|
||||
|
||||
clean-files += sha256-core.S sha512-core.S
|
||||
clean-files += poly1305-core.S sha256-core.S sha512-core.S
|
||||
|
|
|
@ -384,7 +384,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
|
|||
goto xts_tail;
|
||||
|
||||
kernel_neon_end();
|
||||
skcipher_walk_done(&walk, nbytes);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
if (err || likely(!tail))
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* ARM NEON accelerated ChaCha and XChaCha stream ciphers,
|
||||
* ARM NEON and scalar accelerated ChaCha and XChaCha stream ciphers,
|
||||
* including ChaCha20 (RFC7539)
|
||||
*
|
||||
* Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
|
||||
|
@ -20,9 +20,10 @@
|
|||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/internal/chacha.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
|
@ -36,6 +37,8 @@ asmlinkage void chacha_4block_xor_neon(u32 *state, u8 *dst, const u8 *src,
|
|||
int nrounds, int bytes);
|
||||
asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
|
||||
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
||||
|
||||
static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
|
||||
int bytes, int nrounds)
|
||||
{
|
||||
|
@ -59,6 +62,37 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
|
|||
}
|
||||
}
|
||||
|
||||
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
|
||||
{
|
||||
if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) {
|
||||
hchacha_block_generic(state, stream, nrounds);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
hchacha_block_neon(state, stream, nrounds);
|
||||
kernel_neon_end();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(hchacha_block_arch);
|
||||
|
||||
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
|
||||
{
|
||||
chacha_init_generic(state, key, iv);
|
||||
}
|
||||
EXPORT_SYMBOL(chacha_init_arch);
|
||||
|
||||
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
|
||||
int nrounds)
|
||||
{
|
||||
if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE ||
|
||||
!crypto_simd_usable())
|
||||
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
|
||||
|
||||
kernel_neon_begin();
|
||||
chacha_doneon(state, dst, src, bytes, nrounds);
|
||||
kernel_neon_end();
|
||||
}
|
||||
EXPORT_SYMBOL(chacha_crypt_arch);
|
||||
|
||||
static int chacha_neon_stream_xor(struct skcipher_request *req,
|
||||
const struct chacha_ctx *ctx, const u8 *iv)
|
||||
{
|
||||
|
@ -68,7 +102,7 @@ static int chacha_neon_stream_xor(struct skcipher_request *req,
|
|||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
crypto_chacha_init(state, ctx, iv);
|
||||
chacha_init_generic(state, ctx->key, iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
@ -76,10 +110,17 @@ static int chacha_neon_stream_xor(struct skcipher_request *req,
|
|||
if (nbytes < walk.total)
|
||||
nbytes = rounddown(nbytes, walk.stride);
|
||||
|
||||
kernel_neon_begin();
|
||||
chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes, ctx->nrounds);
|
||||
kernel_neon_end();
|
||||
if (!static_branch_likely(&have_neon) ||
|
||||
!crypto_simd_usable()) {
|
||||
chacha_crypt_generic(state, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, nbytes,
|
||||
ctx->nrounds);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
chacha_doneon(state, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, nbytes, ctx->nrounds);
|
||||
kernel_neon_end();
|
||||
}
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
|
@ -91,9 +132,6 @@ static int chacha_neon(struct skcipher_request *req)
|
|||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_chacha_crypt(req);
|
||||
|
||||
return chacha_neon_stream_xor(req, ctx, req->iv);
|
||||
}
|
||||
|
||||
|
@ -105,14 +143,8 @@ static int xchacha_neon(struct skcipher_request *req)
|
|||
u32 state[16];
|
||||
u8 real_iv[16];
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_xchacha_crypt(req);
|
||||
|
||||
crypto_chacha_init(state, ctx, req->iv);
|
||||
|
||||
kernel_neon_begin();
|
||||
hchacha_block_neon(state, subctx.key, ctx->nrounds);
|
||||
kernel_neon_end();
|
||||
chacha_init_generic(state, ctx->key, req->iv);
|
||||
hchacha_block_arch(state, subctx.key, ctx->nrounds);
|
||||
subctx.nrounds = ctx->nrounds;
|
||||
|
||||
memcpy(&real_iv[0], req->iv + 24, 8);
|
||||
|
@ -134,7 +166,7 @@ static struct skcipher_alg algs[] = {
|
|||
.ivsize = CHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.walksize = 5 * CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = chacha_neon,
|
||||
.decrypt = chacha_neon,
|
||||
}, {
|
||||
|
@ -150,7 +182,7 @@ static struct skcipher_alg algs[] = {
|
|||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.walksize = 5 * CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = xchacha_neon,
|
||||
.decrypt = xchacha_neon,
|
||||
}, {
|
||||
|
@ -166,7 +198,7 @@ static struct skcipher_alg algs[] = {
|
|||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.walksize = 5 * CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha12_setkey,
|
||||
.setkey = chacha12_setkey,
|
||||
.encrypt = xchacha_neon,
|
||||
.decrypt = xchacha_neon,
|
||||
}
|
||||
|
@ -175,14 +207,17 @@ static struct skcipher_alg algs[] = {
|
|||
static int __init chacha_simd_mod_init(void)
|
||||
{
|
||||
if (!cpu_have_named_feature(ASIMD))
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
static_branch_enable(&have_neon);
|
||||
|
||||
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
static void __exit chacha_simd_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
|
||||
if (cpu_have_named_feature(ASIMD))
|
||||
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
module_init(chacha_simd_mod_init);
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
T1 .req v2
|
||||
T2 .req v3
|
||||
MASK .req v4
|
||||
XL .req v5
|
||||
XM .req v6
|
||||
XM .req v5
|
||||
XL .req v6
|
||||
XH .req v7
|
||||
IN1 .req v7
|
||||
|
||||
|
@ -358,20 +358,37 @@ ENTRY(pmull_ghash_update_p8)
|
|||
__pmull_ghash p8
|
||||
ENDPROC(pmull_ghash_update_p8)
|
||||
|
||||
KS0 .req v12
|
||||
KS1 .req v13
|
||||
INP0 .req v14
|
||||
INP1 .req v15
|
||||
KS0 .req v8
|
||||
KS1 .req v9
|
||||
KS2 .req v10
|
||||
KS3 .req v11
|
||||
|
||||
.macro load_round_keys, rounds, rk
|
||||
cmp \rounds, #12
|
||||
blo 2222f /* 128 bits */
|
||||
beq 1111f /* 192 bits */
|
||||
ld1 {v17.4s-v18.4s}, [\rk], #32
|
||||
1111: ld1 {v19.4s-v20.4s}, [\rk], #32
|
||||
2222: ld1 {v21.4s-v24.4s}, [\rk], #64
|
||||
ld1 {v25.4s-v28.4s}, [\rk], #64
|
||||
ld1 {v29.4s-v31.4s}, [\rk]
|
||||
INP0 .req v21
|
||||
INP1 .req v22
|
||||
INP2 .req v23
|
||||
INP3 .req v24
|
||||
|
||||
K0 .req v25
|
||||
K1 .req v26
|
||||
K2 .req v27
|
||||
K3 .req v28
|
||||
K4 .req v12
|
||||
K5 .req v13
|
||||
K6 .req v4
|
||||
K7 .req v5
|
||||
K8 .req v14
|
||||
K9 .req v15
|
||||
KK .req v29
|
||||
KL .req v30
|
||||
KM .req v31
|
||||
|
||||
.macro load_round_keys, rounds, rk, tmp
|
||||
add \tmp, \rk, #64
|
||||
ld1 {K0.4s-K3.4s}, [\rk]
|
||||
ld1 {K4.4s-K5.4s}, [\tmp]
|
||||
add \tmp, \rk, \rounds, lsl #4
|
||||
sub \tmp, \tmp, #32
|
||||
ld1 {KK.4s-KM.4s}, [\tmp]
|
||||
.endm
|
||||
|
||||
.macro enc_round, state, key
|
||||
|
@ -379,197 +396,367 @@ ENDPROC(pmull_ghash_update_p8)
|
|||
aesmc \state\().16b, \state\().16b
|
||||
.endm
|
||||
|
||||
.macro enc_block, state, rounds
|
||||
cmp \rounds, #12
|
||||
b.lo 2222f /* 128 bits */
|
||||
b.eq 1111f /* 192 bits */
|
||||
enc_round \state, v17
|
||||
enc_round \state, v18
|
||||
1111: enc_round \state, v19
|
||||
enc_round \state, v20
|
||||
2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
|
||||
enc_round \state, \key
|
||||
.endr
|
||||
aese \state\().16b, v30.16b
|
||||
eor \state\().16b, \state\().16b, v31.16b
|
||||
.macro enc_qround, s0, s1, s2, s3, key
|
||||
enc_round \s0, \key
|
||||
enc_round \s1, \key
|
||||
enc_round \s2, \key
|
||||
enc_round \s3, \key
|
||||
.endm
|
||||
|
||||
.macro pmull_gcm_do_crypt, enc
|
||||
ld1 {SHASH.2d}, [x4], #16
|
||||
ld1 {HH.2d}, [x4]
|
||||
ld1 {XL.2d}, [x1]
|
||||
ldr x8, [x5, #8] // load lower counter
|
||||
.macro enc_block, state, rounds, rk, tmp
|
||||
add \tmp, \rk, #96
|
||||
ld1 {K6.4s-K7.4s}, [\tmp], #32
|
||||
.irp key, K0, K1, K2, K3, K4 K5
|
||||
enc_round \state, \key
|
||||
.endr
|
||||
|
||||
tbnz \rounds, #2, .Lnot128_\@
|
||||
.Lout256_\@:
|
||||
enc_round \state, K6
|
||||
enc_round \state, K7
|
||||
|
||||
.Lout192_\@:
|
||||
enc_round \state, KK
|
||||
aese \state\().16b, KL.16b
|
||||
eor \state\().16b, \state\().16b, KM.16b
|
||||
|
||||
.subsection 1
|
||||
.Lnot128_\@:
|
||||
ld1 {K8.4s-K9.4s}, [\tmp], #32
|
||||
enc_round \state, K6
|
||||
enc_round \state, K7
|
||||
ld1 {K6.4s-K7.4s}, [\tmp]
|
||||
enc_round \state, K8
|
||||
enc_round \state, K9
|
||||
tbz \rounds, #1, .Lout192_\@
|
||||
b .Lout256_\@
|
||||
.previous
|
||||
.endm
|
||||
|
||||
.align 6
|
||||
.macro pmull_gcm_do_crypt, enc
|
||||
stp x29, x30, [sp, #-32]!
|
||||
mov x29, sp
|
||||
str x19, [sp, #24]
|
||||
|
||||
load_round_keys x7, x6, x8
|
||||
|
||||
ld1 {SHASH.2d}, [x3], #16
|
||||
ld1 {HH.2d-HH4.2d}, [x3]
|
||||
|
||||
movi MASK.16b, #0xe1
|
||||
trn1 SHASH2.2d, SHASH.2d, HH.2d
|
||||
trn2 T1.2d, SHASH.2d, HH.2d
|
||||
CPU_LE( rev x8, x8 )
|
||||
shl MASK.2d, MASK.2d, #57
|
||||
eor SHASH2.16b, SHASH2.16b, T1.16b
|
||||
|
||||
trn1 HH34.2d, HH3.2d, HH4.2d
|
||||
trn2 T1.2d, HH3.2d, HH4.2d
|
||||
eor HH34.16b, HH34.16b, T1.16b
|
||||
|
||||
ld1 {XL.2d}, [x4]
|
||||
|
||||
cbz x0, 3f // tag only?
|
||||
|
||||
ldr w8, [x5, #12] // load lower counter
|
||||
CPU_LE( rev w8, w8 )
|
||||
|
||||
0: mov w9, #4 // max blocks per round
|
||||
add x10, x0, #0xf
|
||||
lsr x10, x10, #4 // remaining blocks
|
||||
|
||||
subs x0, x0, #64
|
||||
csel w9, w10, w9, mi
|
||||
add w8, w8, w9
|
||||
|
||||
bmi 1f
|
||||
ld1 {INP0.16b-INP3.16b}, [x2], #64
|
||||
.subsection 1
|
||||
/*
|
||||
* Populate the four input registers right to left with up to 63 bytes
|
||||
* of data, using overlapping loads to avoid branches.
|
||||
*
|
||||
* INP0 INP1 INP2 INP3
|
||||
* 1 byte | | | |x |
|
||||
* 16 bytes | | | |xxxxxxxx|
|
||||
* 17 bytes | | |xxxxxxxx|x |
|
||||
* 47 bytes | |xxxxxxxx|xxxxxxxx|xxxxxxx |
|
||||
* etc etc
|
||||
*
|
||||
* Note that this code may read up to 15 bytes before the start of
|
||||
* the input. It is up to the calling code to ensure this is safe if
|
||||
* this happens in the first iteration of the loop (i.e., when the
|
||||
* input size is < 16 bytes)
|
||||
*/
|
||||
1: mov x15, #16
|
||||
ands x19, x0, #0xf
|
||||
csel x19, x19, x15, ne
|
||||
adr_l x17, .Lpermute_table + 16
|
||||
|
||||
sub x11, x15, x19
|
||||
add x12, x17, x11
|
||||
sub x17, x17, x11
|
||||
ld1 {T1.16b}, [x12]
|
||||
sub x10, x1, x11
|
||||
sub x11, x2, x11
|
||||
|
||||
cmp x0, #-16
|
||||
csel x14, x15, xzr, gt
|
||||
cmp x0, #-32
|
||||
csel x15, x15, xzr, gt
|
||||
cmp x0, #-48
|
||||
csel x16, x19, xzr, gt
|
||||
csel x1, x1, x10, gt
|
||||
csel x2, x2, x11, gt
|
||||
|
||||
ld1 {INP0.16b}, [x2], x14
|
||||
ld1 {INP1.16b}, [x2], x15
|
||||
ld1 {INP2.16b}, [x2], x16
|
||||
ld1 {INP3.16b}, [x2]
|
||||
tbl INP3.16b, {INP3.16b}, T1.16b
|
||||
b 2f
|
||||
.previous
|
||||
|
||||
2: .if \enc == 0
|
||||
bl pmull_gcm_ghash_4x
|
||||
.endif
|
||||
|
||||
bl pmull_gcm_enc_4x
|
||||
|
||||
tbnz x0, #63, 6f
|
||||
st1 {INP0.16b-INP3.16b}, [x1], #64
|
||||
.if \enc == 1
|
||||
ldr x10, [sp]
|
||||
ld1 {KS0.16b-KS1.16b}, [x10]
|
||||
bl pmull_gcm_ghash_4x
|
||||
.endif
|
||||
bne 0b
|
||||
|
||||
cbnz x6, 4f
|
||||
3: ldp x19, x10, [sp, #24]
|
||||
cbz x10, 5f // output tag?
|
||||
|
||||
0: ld1 {INP0.16b-INP1.16b}, [x3], #32
|
||||
ld1 {INP3.16b}, [x10] // load lengths[]
|
||||
mov w9, #1
|
||||
bl pmull_gcm_ghash_4x
|
||||
|
||||
rev x9, x8
|
||||
add x11, x8, #1
|
||||
add x8, x8, #2
|
||||
mov w11, #(0x1 << 24) // BE '1U'
|
||||
ld1 {KS0.16b}, [x5]
|
||||
mov KS0.s[3], w11
|
||||
|
||||
.if \enc == 1
|
||||
eor INP0.16b, INP0.16b, KS0.16b // encrypt input
|
||||
eor INP1.16b, INP1.16b, KS1.16b
|
||||
.endif
|
||||
enc_block KS0, x7, x6, x12
|
||||
|
||||
ld1 {KS0.8b}, [x5] // load upper counter
|
||||
rev x11, x11
|
||||
sub w0, w0, #2
|
||||
mov KS1.8b, KS0.8b
|
||||
ins KS0.d[1], x9 // set lower counter
|
||||
ins KS1.d[1], x11
|
||||
|
||||
rev64 T1.16b, INP1.16b
|
||||
|
||||
cmp w7, #12
|
||||
b.ge 2f // AES-192/256?
|
||||
|
||||
1: enc_round KS0, v21
|
||||
ext IN1.16b, T1.16b, T1.16b, #8
|
||||
|
||||
enc_round KS1, v21
|
||||
pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
|
||||
|
||||
enc_round KS0, v22
|
||||
eor T1.16b, T1.16b, IN1.16b
|
||||
|
||||
enc_round KS1, v22
|
||||
pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
|
||||
|
||||
enc_round KS0, v23
|
||||
pmull XM2.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
|
||||
|
||||
enc_round KS1, v23
|
||||
rev64 T1.16b, INP0.16b
|
||||
ext T2.16b, XL.16b, XL.16b, #8
|
||||
|
||||
enc_round KS0, v24
|
||||
ext IN1.16b, T1.16b, T1.16b, #8
|
||||
eor T1.16b, T1.16b, T2.16b
|
||||
|
||||
enc_round KS1, v24
|
||||
eor XL.16b, XL.16b, IN1.16b
|
||||
|
||||
enc_round KS0, v25
|
||||
eor T1.16b, T1.16b, XL.16b
|
||||
|
||||
enc_round KS1, v25
|
||||
pmull2 XH.1q, HH.2d, XL.2d // a1 * b1
|
||||
|
||||
enc_round KS0, v26
|
||||
pmull XL.1q, HH.1d, XL.1d // a0 * b0
|
||||
|
||||
enc_round KS1, v26
|
||||
pmull2 XM.1q, SHASH2.2d, T1.2d // (a1 + a0)(b1 + b0)
|
||||
|
||||
enc_round KS0, v27
|
||||
eor XL.16b, XL.16b, XL2.16b
|
||||
eor XH.16b, XH.16b, XH2.16b
|
||||
|
||||
enc_round KS1, v27
|
||||
eor XM.16b, XM.16b, XM2.16b
|
||||
ext T1.16b, XL.16b, XH.16b, #8
|
||||
|
||||
enc_round KS0, v28
|
||||
eor T2.16b, XL.16b, XH.16b
|
||||
eor XM.16b, XM.16b, T1.16b
|
||||
|
||||
enc_round KS1, v28
|
||||
eor XM.16b, XM.16b, T2.16b
|
||||
|
||||
enc_round KS0, v29
|
||||
pmull T2.1q, XL.1d, MASK.1d
|
||||
|
||||
enc_round KS1, v29
|
||||
mov XH.d[0], XM.d[1]
|
||||
mov XM.d[1], XL.d[0]
|
||||
|
||||
aese KS0.16b, v30.16b
|
||||
eor XL.16b, XM.16b, T2.16b
|
||||
|
||||
aese KS1.16b, v30.16b
|
||||
ext T2.16b, XL.16b, XL.16b, #8
|
||||
|
||||
eor KS0.16b, KS0.16b, v31.16b
|
||||
pmull XL.1q, XL.1d, MASK.1d
|
||||
eor T2.16b, T2.16b, XH.16b
|
||||
|
||||
eor KS1.16b, KS1.16b, v31.16b
|
||||
eor XL.16b, XL.16b, T2.16b
|
||||
|
||||
.if \enc == 0
|
||||
eor INP0.16b, INP0.16b, KS0.16b
|
||||
eor INP1.16b, INP1.16b, KS1.16b
|
||||
.endif
|
||||
|
||||
st1 {INP0.16b-INP1.16b}, [x2], #32
|
||||
|
||||
cbnz w0, 0b
|
||||
|
||||
CPU_LE( rev x8, x8 )
|
||||
st1 {XL.2d}, [x1]
|
||||
str x8, [x5, #8] // store lower counter
|
||||
|
||||
.if \enc == 1
|
||||
st1 {KS0.16b-KS1.16b}, [x10]
|
||||
.endif
|
||||
ext XL.16b, XL.16b, XL.16b, #8
|
||||
rev64 XL.16b, XL.16b
|
||||
eor XL.16b, XL.16b, KS0.16b
|
||||
st1 {XL.16b}, [x10] // store tag
|
||||
|
||||
4: ldp x29, x30, [sp], #32
|
||||
ret
|
||||
|
||||
2: b.eq 3f // AES-192?
|
||||
enc_round KS0, v17
|
||||
enc_round KS1, v17
|
||||
enc_round KS0, v18
|
||||
enc_round KS1, v18
|
||||
3: enc_round KS0, v19
|
||||
enc_round KS1, v19
|
||||
enc_round KS0, v20
|
||||
enc_round KS1, v20
|
||||
b 1b
|
||||
5:
|
||||
CPU_LE( rev w8, w8 )
|
||||
str w8, [x5, #12] // store lower counter
|
||||
st1 {XL.2d}, [x4]
|
||||
b 4b
|
||||
|
||||
4: load_round_keys w7, x6
|
||||
b 0b
|
||||
6: ld1 {T1.16b-T2.16b}, [x17], #32 // permute vectors
|
||||
sub x17, x17, x19, lsl #1
|
||||
|
||||
cmp w9, #1
|
||||
beq 7f
|
||||
.subsection 1
|
||||
7: ld1 {INP2.16b}, [x1]
|
||||
tbx INP2.16b, {INP3.16b}, T1.16b
|
||||
mov INP3.16b, INP2.16b
|
||||
b 8f
|
||||
.previous
|
||||
|
||||
st1 {INP0.16b}, [x1], x14
|
||||
st1 {INP1.16b}, [x1], x15
|
||||
st1 {INP2.16b}, [x1], x16
|
||||
tbl INP3.16b, {INP3.16b}, T1.16b
|
||||
tbx INP3.16b, {INP2.16b}, T2.16b
|
||||
8: st1 {INP3.16b}, [x1]
|
||||
|
||||
.if \enc == 1
|
||||
ld1 {T1.16b}, [x17]
|
||||
tbl INP3.16b, {INP3.16b}, T1.16b // clear non-data bits
|
||||
bl pmull_gcm_ghash_4x
|
||||
.endif
|
||||
b 3b
|
||||
.endm
|
||||
|
||||
/*
|
||||
* void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], const u8 src[],
|
||||
* struct ghash_key const *k, u8 ctr[],
|
||||
* int rounds, u8 ks[])
|
||||
* void pmull_gcm_encrypt(int blocks, u8 dst[], const u8 src[],
|
||||
* struct ghash_key const *k, u64 dg[], u8 ctr[],
|
||||
* int rounds, u8 tag)
|
||||
*/
|
||||
ENTRY(pmull_gcm_encrypt)
|
||||
pmull_gcm_do_crypt 1
|
||||
ENDPROC(pmull_gcm_encrypt)
|
||||
|
||||
/*
|
||||
* void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[], const u8 src[],
|
||||
* struct ghash_key const *k, u8 ctr[],
|
||||
* int rounds)
|
||||
* void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[],
|
||||
* struct ghash_key const *k, u64 dg[], u8 ctr[],
|
||||
* int rounds, u8 tag)
|
||||
*/
|
||||
ENTRY(pmull_gcm_decrypt)
|
||||
pmull_gcm_do_crypt 0
|
||||
ENDPROC(pmull_gcm_decrypt)
|
||||
|
||||
/*
|
||||
* void pmull_gcm_encrypt_block(u8 dst[], u8 src[], u8 rk[], int rounds)
|
||||
*/
|
||||
ENTRY(pmull_gcm_encrypt_block)
|
||||
cbz x2, 0f
|
||||
load_round_keys w3, x2
|
||||
0: ld1 {v0.16b}, [x1]
|
||||
enc_block v0, w3
|
||||
st1 {v0.16b}, [x0]
|
||||
pmull_gcm_ghash_4x:
|
||||
movi MASK.16b, #0xe1
|
||||
shl MASK.2d, MASK.2d, #57
|
||||
|
||||
rev64 T1.16b, INP0.16b
|
||||
rev64 T2.16b, INP1.16b
|
||||
rev64 TT3.16b, INP2.16b
|
||||
rev64 TT4.16b, INP3.16b
|
||||
|
||||
ext XL.16b, XL.16b, XL.16b, #8
|
||||
|
||||
tbz w9, #2, 0f // <4 blocks?
|
||||
.subsection 1
|
||||
0: movi XH2.16b, #0
|
||||
movi XM2.16b, #0
|
||||
movi XL2.16b, #0
|
||||
|
||||
tbz w9, #0, 1f // 2 blocks?
|
||||
tbz w9, #1, 2f // 1 block?
|
||||
|
||||
eor T2.16b, T2.16b, XL.16b
|
||||
ext T1.16b, T2.16b, T2.16b, #8
|
||||
b .Lgh3
|
||||
|
||||
1: eor TT3.16b, TT3.16b, XL.16b
|
||||
ext T2.16b, TT3.16b, TT3.16b, #8
|
||||
b .Lgh2
|
||||
|
||||
2: eor TT4.16b, TT4.16b, XL.16b
|
||||
ext IN1.16b, TT4.16b, TT4.16b, #8
|
||||
b .Lgh1
|
||||
.previous
|
||||
|
||||
eor T1.16b, T1.16b, XL.16b
|
||||
ext IN1.16b, T1.16b, T1.16b, #8
|
||||
|
||||
pmull2 XH2.1q, HH4.2d, IN1.2d // a1 * b1
|
||||
eor T1.16b, T1.16b, IN1.16b
|
||||
pmull XL2.1q, HH4.1d, IN1.1d // a0 * b0
|
||||
pmull2 XM2.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
|
||||
|
||||
ext T1.16b, T2.16b, T2.16b, #8
|
||||
.Lgh3: eor T2.16b, T2.16b, T1.16b
|
||||
pmull2 XH.1q, HH3.2d, T1.2d // a1 * b1
|
||||
pmull XL.1q, HH3.1d, T1.1d // a0 * b0
|
||||
pmull XM.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
|
||||
|
||||
eor XH2.16b, XH2.16b, XH.16b
|
||||
eor XL2.16b, XL2.16b, XL.16b
|
||||
eor XM2.16b, XM2.16b, XM.16b
|
||||
|
||||
ext T2.16b, TT3.16b, TT3.16b, #8
|
||||
.Lgh2: eor TT3.16b, TT3.16b, T2.16b
|
||||
pmull2 XH.1q, HH.2d, T2.2d // a1 * b1
|
||||
pmull XL.1q, HH.1d, T2.1d // a0 * b0
|
||||
pmull2 XM.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
|
||||
|
||||
eor XH2.16b, XH2.16b, XH.16b
|
||||
eor XL2.16b, XL2.16b, XL.16b
|
||||
eor XM2.16b, XM2.16b, XM.16b
|
||||
|
||||
ext IN1.16b, TT4.16b, TT4.16b, #8
|
||||
.Lgh1: eor TT4.16b, TT4.16b, IN1.16b
|
||||
pmull XL.1q, SHASH.1d, IN1.1d // a0 * b0
|
||||
pmull2 XH.1q, SHASH.2d, IN1.2d // a1 * b1
|
||||
pmull XM.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
|
||||
|
||||
eor XH.16b, XH.16b, XH2.16b
|
||||
eor XL.16b, XL.16b, XL2.16b
|
||||
eor XM.16b, XM.16b, XM2.16b
|
||||
|
||||
eor T2.16b, XL.16b, XH.16b
|
||||
ext T1.16b, XL.16b, XH.16b, #8
|
||||
eor XM.16b, XM.16b, T2.16b
|
||||
|
||||
__pmull_reduce_p64
|
||||
|
||||
eor T2.16b, T2.16b, XH.16b
|
||||
eor XL.16b, XL.16b, T2.16b
|
||||
|
||||
ret
|
||||
ENDPROC(pmull_gcm_encrypt_block)
|
||||
ENDPROC(pmull_gcm_ghash_4x)
|
||||
|
||||
pmull_gcm_enc_4x:
|
||||
ld1 {KS0.16b}, [x5] // load upper counter
|
||||
sub w10, w8, #4
|
||||
sub w11, w8, #3
|
||||
sub w12, w8, #2
|
||||
sub w13, w8, #1
|
||||
rev w10, w10
|
||||
rev w11, w11
|
||||
rev w12, w12
|
||||
rev w13, w13
|
||||
mov KS1.16b, KS0.16b
|
||||
mov KS2.16b, KS0.16b
|
||||
mov KS3.16b, KS0.16b
|
||||
ins KS0.s[3], w10 // set lower counter
|
||||
ins KS1.s[3], w11
|
||||
ins KS2.s[3], w12
|
||||
ins KS3.s[3], w13
|
||||
|
||||
add x10, x6, #96 // round key pointer
|
||||
ld1 {K6.4s-K7.4s}, [x10], #32
|
||||
.irp key, K0, K1, K2, K3, K4, K5
|
||||
enc_qround KS0, KS1, KS2, KS3, \key
|
||||
.endr
|
||||
|
||||
tbnz x7, #2, .Lnot128
|
||||
.subsection 1
|
||||
.Lnot128:
|
||||
ld1 {K8.4s-K9.4s}, [x10], #32
|
||||
.irp key, K6, K7
|
||||
enc_qround KS0, KS1, KS2, KS3, \key
|
||||
.endr
|
||||
ld1 {K6.4s-K7.4s}, [x10]
|
||||
.irp key, K8, K9
|
||||
enc_qround KS0, KS1, KS2, KS3, \key
|
||||
.endr
|
||||
tbz x7, #1, .Lout192
|
||||
b .Lout256
|
||||
.previous
|
||||
|
||||
.Lout256:
|
||||
.irp key, K6, K7
|
||||
enc_qround KS0, KS1, KS2, KS3, \key
|
||||
.endr
|
||||
|
||||
.Lout192:
|
||||
enc_qround KS0, KS1, KS2, KS3, KK
|
||||
|
||||
aese KS0.16b, KL.16b
|
||||
aese KS1.16b, KL.16b
|
||||
aese KS2.16b, KL.16b
|
||||
aese KS3.16b, KL.16b
|
||||
|
||||
eor KS0.16b, KS0.16b, KM.16b
|
||||
eor KS1.16b, KS1.16b, KM.16b
|
||||
eor KS2.16b, KS2.16b, KM.16b
|
||||
eor KS3.16b, KS3.16b, KM.16b
|
||||
|
||||
eor INP0.16b, INP0.16b, KS0.16b
|
||||
eor INP1.16b, INP1.16b, KS1.16b
|
||||
eor INP2.16b, INP2.16b, KS2.16b
|
||||
eor INP3.16b, INP3.16b, KS3.16b
|
||||
|
||||
ret
|
||||
ENDPROC(pmull_gcm_enc_4x)
|
||||
|
||||
.section ".rodata", "a"
|
||||
.align 6
|
||||
.Lpermute_table:
|
||||
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
||||
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
||||
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
|
||||
.byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
|
||||
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
||||
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
||||
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
|
||||
.byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
|
||||
.previous
|
||||
|
|
|
@ -58,17 +58,15 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
|
|||
struct ghash_key const *k,
|
||||
const char *head);
|
||||
|
||||
asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[],
|
||||
const u8 src[], struct ghash_key const *k,
|
||||
asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[],
|
||||
struct ghash_key const *k, u64 dg[],
|
||||
u8 ctr[], u32 const rk[], int rounds,
|
||||
u8 ks[]);
|
||||
u8 tag[]);
|
||||
|
||||
asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
|
||||
const u8 src[], struct ghash_key const *k,
|
||||
u8 ctr[], u32 const rk[], int rounds);
|
||||
|
||||
asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
|
||||
u32 const rk[], int rounds);
|
||||
asmlinkage void pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
|
||||
struct ghash_key const *k, u64 dg[],
|
||||
u8 ctr[], u32 const rk[], int rounds,
|
||||
u8 tag[]);
|
||||
|
||||
static int ghash_init(struct shash_desc *desc)
|
||||
{
|
||||
|
@ -85,7 +83,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
|
|||
struct ghash_key const *k,
|
||||
const char *head))
|
||||
{
|
||||
if (likely(crypto_simd_usable())) {
|
||||
if (likely(crypto_simd_usable() && simd_update)) {
|
||||
kernel_neon_begin();
|
||||
simd_update(blocks, dg, src, key, head);
|
||||
kernel_neon_end();
|
||||
|
@ -398,136 +396,112 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
|
|||
}
|
||||
}
|
||||
|
||||
static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx,
|
||||
u64 dg[], u8 tag[], int cryptlen)
|
||||
{
|
||||
u8 mac[AES_BLOCK_SIZE];
|
||||
u128 lengths;
|
||||
|
||||
lengths.a = cpu_to_be64(req->assoclen * 8);
|
||||
lengths.b = cpu_to_be64(cryptlen * 8);
|
||||
|
||||
ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL,
|
||||
pmull_ghash_update_p64);
|
||||
|
||||
put_unaligned_be64(dg[1], mac);
|
||||
put_unaligned_be64(dg[0], mac + 8);
|
||||
|
||||
crypto_xor(tag, mac, AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static int gcm_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct skcipher_walk walk;
|
||||
u8 iv[AES_BLOCK_SIZE];
|
||||
u8 ks[2 * AES_BLOCK_SIZE];
|
||||
u8 tag[AES_BLOCK_SIZE];
|
||||
u64 dg[2] = {};
|
||||
int nrounds = num_rounds(&ctx->aes_key);
|
||||
struct skcipher_walk walk;
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
u8 iv[AES_BLOCK_SIZE];
|
||||
u64 dg[2] = {};
|
||||
u128 lengths;
|
||||
u8 *tag;
|
||||
int err;
|
||||
|
||||
lengths.a = cpu_to_be64(req->assoclen * 8);
|
||||
lengths.b = cpu_to_be64(req->cryptlen * 8);
|
||||
|
||||
if (req->assoclen)
|
||||
gcm_calculate_auth_mac(req, dg);
|
||||
|
||||
memcpy(iv, req->iv, GCM_IV_SIZE);
|
||||
put_unaligned_be32(1, iv + GCM_IV_SIZE);
|
||||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
|
||||
err = skcipher_walk_aead_encrypt(&walk, req, false);
|
||||
|
||||
if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
|
||||
u32 const *rk = NULL;
|
||||
|
||||
kernel_neon_begin();
|
||||
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
|
||||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
|
||||
put_unaligned_be32(3, iv + GCM_IV_SIZE);
|
||||
pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
|
||||
put_unaligned_be32(4, iv + GCM_IV_SIZE);
|
||||
|
||||
if (likely(crypto_simd_usable())) {
|
||||
do {
|
||||
int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
int nbytes = walk.nbytes;
|
||||
|
||||
if (rk)
|
||||
kernel_neon_begin();
|
||||
tag = (u8 *)&lengths;
|
||||
|
||||
pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, &ctx->ghash_key,
|
||||
iv, rk, nrounds, ks);
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
|
||||
src = dst = memcpy(buf + sizeof(buf) - nbytes,
|
||||
src, nbytes);
|
||||
} else if (nbytes < walk.total) {
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
tag = NULL;
|
||||
}
|
||||
|
||||
kernel_neon_begin();
|
||||
pmull_gcm_encrypt(nbytes, dst, src, &ctx->ghash_key, dg,
|
||||
iv, ctx->aes_key.key_enc, nrounds,
|
||||
tag);
|
||||
kernel_neon_end();
|
||||
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes % (2 * AES_BLOCK_SIZE));
|
||||
if (unlikely(!nbytes))
|
||||
break;
|
||||
|
||||
rk = ctx->aes_key.key_enc;
|
||||
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
|
||||
memcpy(walk.dst.virt.addr,
|
||||
buf + sizeof(buf) - nbytes, nbytes);
|
||||
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
} while (walk.nbytes);
|
||||
} else {
|
||||
aes_encrypt(&ctx->aes_key, tag, iv);
|
||||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
|
||||
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
|
||||
const int blocks =
|
||||
walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
int remaining = blocks;
|
||||
|
||||
do {
|
||||
aes_encrypt(&ctx->aes_key, ks, iv);
|
||||
crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
|
||||
crypto_inc(iv, AES_BLOCK_SIZE);
|
||||
|
||||
dst += AES_BLOCK_SIZE;
|
||||
src += AES_BLOCK_SIZE;
|
||||
} while (--remaining > 0);
|
||||
|
||||
ghash_do_update(blocks, dg,
|
||||
walk.dst.virt.addr, &ctx->ghash_key,
|
||||
NULL, pmull_ghash_update_p64);
|
||||
ghash_do_update(blocks, dg, walk.dst.virt.addr,
|
||||
&ctx->ghash_key, NULL, NULL);
|
||||
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes % (2 * AES_BLOCK_SIZE));
|
||||
walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
/* handle the tail */
|
||||
if (walk.nbytes) {
|
||||
aes_encrypt(&ctx->aes_key, ks, iv);
|
||||
if (walk.nbytes > AES_BLOCK_SIZE) {
|
||||
crypto_inc(iv, AES_BLOCK_SIZE);
|
||||
aes_encrypt(&ctx->aes_key, ks + AES_BLOCK_SIZE, iv);
|
||||
}
|
||||
}
|
||||
}
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
|
||||
/* handle the tail */
|
||||
if (walk.nbytes) {
|
||||
u8 buf[GHASH_BLOCK_SIZE];
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *head = NULL;
|
||||
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
buf, walk.nbytes);
|
||||
|
||||
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks,
|
||||
walk.nbytes);
|
||||
|
||||
if (walk.nbytes > GHASH_BLOCK_SIZE) {
|
||||
head = dst;
|
||||
dst += GHASH_BLOCK_SIZE;
|
||||
nbytes %= GHASH_BLOCK_SIZE;
|
||||
memcpy(buf, walk.dst.virt.addr, walk.nbytes);
|
||||
memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
|
||||
}
|
||||
|
||||
memcpy(buf, dst, nbytes);
|
||||
memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
|
||||
ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
|
||||
pmull_ghash_update_p64);
|
||||
tag = (u8 *)&lengths;
|
||||
ghash_do_update(1, dg, tag, &ctx->ghash_key,
|
||||
walk.nbytes ? buf : NULL, NULL);
|
||||
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
if (walk.nbytes)
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
|
||||
put_unaligned_be64(dg[1], tag);
|
||||
put_unaligned_be64(dg[0], tag + 8);
|
||||
put_unaligned_be32(1, iv + GCM_IV_SIZE);
|
||||
aes_encrypt(&ctx->aes_key, iv, iv);
|
||||
crypto_xor(tag, iv, AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
gcm_final(req, ctx, dg, tag, req->cryptlen);
|
||||
|
||||
/* copy authtag to end of dst */
|
||||
scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen,
|
||||
crypto_aead_authsize(aead), 1);
|
||||
|
@ -540,75 +514,65 @@ static int gcm_decrypt(struct aead_request *req)
|
|||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
struct skcipher_walk walk;
|
||||
u8 iv[2 * AES_BLOCK_SIZE];
|
||||
u8 tag[AES_BLOCK_SIZE];
|
||||
u8 buf[2 * GHASH_BLOCK_SIZE];
|
||||
u64 dg[2] = {};
|
||||
int nrounds = num_rounds(&ctx->aes_key);
|
||||
struct skcipher_walk walk;
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
u8 iv[AES_BLOCK_SIZE];
|
||||
u64 dg[2] = {};
|
||||
u128 lengths;
|
||||
u8 *tag;
|
||||
int err;
|
||||
|
||||
lengths.a = cpu_to_be64(req->assoclen * 8);
|
||||
lengths.b = cpu_to_be64((req->cryptlen - authsize) * 8);
|
||||
|
||||
if (req->assoclen)
|
||||
gcm_calculate_auth_mac(req, dg);
|
||||
|
||||
memcpy(iv, req->iv, GCM_IV_SIZE);
|
||||
put_unaligned_be32(1, iv + GCM_IV_SIZE);
|
||||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
|
||||
err = skcipher_walk_aead_decrypt(&walk, req, false);
|
||||
|
||||
if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
|
||||
u32 const *rk = NULL;
|
||||
|
||||
kernel_neon_begin();
|
||||
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
|
||||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
|
||||
if (likely(crypto_simd_usable())) {
|
||||
do {
|
||||
int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
|
||||
int rem = walk.total - blocks * AES_BLOCK_SIZE;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
int nbytes = walk.nbytes;
|
||||
|
||||
if (rk)
|
||||
kernel_neon_begin();
|
||||
tag = (u8 *)&lengths;
|
||||
|
||||
pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, &ctx->ghash_key,
|
||||
iv, rk, nrounds);
|
||||
|
||||
/* check if this is the final iteration of the loop */
|
||||
if (rem < (2 * AES_BLOCK_SIZE)) {
|
||||
u8 *iv2 = iv + AES_BLOCK_SIZE;
|
||||
|
||||
if (rem > AES_BLOCK_SIZE) {
|
||||
memcpy(iv2, iv, AES_BLOCK_SIZE);
|
||||
crypto_inc(iv2, AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
|
||||
|
||||
if (rem > AES_BLOCK_SIZE)
|
||||
pmull_gcm_encrypt_block(iv2, iv2, NULL,
|
||||
nrounds);
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
|
||||
src = dst = memcpy(buf + sizeof(buf) - nbytes,
|
||||
src, nbytes);
|
||||
} else if (nbytes < walk.total) {
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
tag = NULL;
|
||||
}
|
||||
|
||||
kernel_neon_begin();
|
||||
pmull_gcm_decrypt(nbytes, dst, src, &ctx->ghash_key, dg,
|
||||
iv, ctx->aes_key.key_enc, nrounds,
|
||||
tag);
|
||||
kernel_neon_end();
|
||||
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes % (2 * AES_BLOCK_SIZE));
|
||||
if (unlikely(!nbytes))
|
||||
break;
|
||||
|
||||
rk = ctx->aes_key.key_enc;
|
||||
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
|
||||
memcpy(walk.dst.virt.addr,
|
||||
buf + sizeof(buf) - nbytes, nbytes);
|
||||
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
} while (walk.nbytes);
|
||||
} else {
|
||||
aes_encrypt(&ctx->aes_key, tag, iv);
|
||||
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
||||
|
||||
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
|
||||
int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
|
||||
ghash_do_update(blocks, dg, walk.src.virt.addr,
|
||||
&ctx->ghash_key, NULL,
|
||||
pmull_ghash_update_p64);
|
||||
&ctx->ghash_key, NULL, NULL);
|
||||
|
||||
do {
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
|
@ -620,49 +584,38 @@ static int gcm_decrypt(struct aead_request *req)
|
|||
} while (--blocks > 0);
|
||||
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes % (2 * AES_BLOCK_SIZE));
|
||||
walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
/* handle the tail */
|
||||
if (walk.nbytes) {
|
||||
if (walk.nbytes > AES_BLOCK_SIZE) {
|
||||
u8 *iv2 = iv + AES_BLOCK_SIZE;
|
||||
|
||||
memcpy(iv2, iv, AES_BLOCK_SIZE);
|
||||
crypto_inc(iv2, AES_BLOCK_SIZE);
|
||||
|
||||
aes_encrypt(&ctx->aes_key, iv2, iv2);
|
||||
}
|
||||
aes_encrypt(&ctx->aes_key, iv, iv);
|
||||
}
|
||||
}
|
||||
|
||||
/* handle the tail */
|
||||
if (walk.nbytes) {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
const u8 *head = NULL;
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
if (walk.nbytes > GHASH_BLOCK_SIZE) {
|
||||
head = src;
|
||||
src += GHASH_BLOCK_SIZE;
|
||||
nbytes %= GHASH_BLOCK_SIZE;
|
||||
memcpy(buf, walk.src.virt.addr, walk.nbytes);
|
||||
memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
|
||||
}
|
||||
|
||||
memcpy(buf, src, nbytes);
|
||||
memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
|
||||
ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
|
||||
pmull_ghash_update_p64);
|
||||
tag = (u8 *)&lengths;
|
||||
ghash_do_update(1, dg, tag, &ctx->ghash_key,
|
||||
walk.nbytes ? buf : NULL, NULL);
|
||||
|
||||
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
|
||||
walk.nbytes);
|
||||
if (walk.nbytes) {
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
buf, walk.nbytes);
|
||||
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
put_unaligned_be64(dg[1], tag);
|
||||
put_unaligned_be64(dg[0], tag + 8);
|
||||
put_unaligned_be32(1, iv + GCM_IV_SIZE);
|
||||
aes_encrypt(&ctx->aes_key, iv, iv);
|
||||
crypto_xor(tag, iv, AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
gcm_final(req, ctx, dg, tag, req->cryptlen - authsize);
|
||||
|
||||
/* compare calculated auth tag with the stored one */
|
||||
scatterwalk_map_and_copy(buf, req->src,
|
||||
req->assoclen + req->cryptlen - authsize,
|
||||
|
@ -675,7 +628,7 @@ static int gcm_decrypt(struct aead_request *req)
|
|||
|
||||
static struct aead_alg gcm_aes_alg = {
|
||||
.ivsize = GCM_IV_SIZE,
|
||||
.chunksize = 2 * AES_BLOCK_SIZE,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = gcm_setkey,
|
||||
.setauthsize = gcm_setauthsize,
|
||||
|
|
|
@ -0,0 +1,913 @@
|
|||
#!/usr/bin/env perl
|
||||
# SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause
|
||||
#
|
||||
# ====================================================================
|
||||
# Written by Andy Polyakov, @dot-asm, initially for the OpenSSL
|
||||
# project.
|
||||
# ====================================================================
|
||||
#
|
||||
# This module implements Poly1305 hash for ARMv8.
|
||||
#
|
||||
# June 2015
|
||||
#
|
||||
# Numbers are cycles per processed byte with poly1305_blocks alone.
|
||||
#
|
||||
# IALU/gcc-4.9 NEON
|
||||
#
|
||||
# Apple A7 1.86/+5% 0.72
|
||||
# Cortex-A53 2.69/+58% 1.47
|
||||
# Cortex-A57 2.70/+7% 1.14
|
||||
# Denver 1.64/+50% 1.18(*)
|
||||
# X-Gene 2.13/+68% 2.27
|
||||
# Mongoose 1.77/+75% 1.12
|
||||
# Kryo 2.70/+55% 1.13
|
||||
# ThunderX2 1.17/+95% 1.36
|
||||
#
|
||||
# (*) estimate based on resources availability is less than 1.0,
|
||||
# i.e. measured result is worse than expected, presumably binary
|
||||
# translator is not almighty;
|
||||
|
||||
$flavour=shift;
|
||||
$output=shift;
|
||||
|
||||
if ($flavour && $flavour ne "void") {
|
||||
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
|
||||
( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
|
||||
( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
|
||||
die "can't locate arm-xlate.pl";
|
||||
|
||||
open STDOUT,"| \"$^X\" $xlate $flavour $output";
|
||||
} else {
|
||||
open STDOUT,">$output";
|
||||
}
|
||||
|
||||
my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3));
|
||||
my ($mac,$nonce)=($inp,$len);
|
||||
|
||||
my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14));
|
||||
|
||||
$code.=<<___;
|
||||
#ifndef __KERNEL__
|
||||
# include "arm_arch.h"
|
||||
.extern OPENSSL_armcap_P
|
||||
#endif
|
||||
|
||||
.text
|
||||
|
||||
// forward "declarations" are required for Apple
|
||||
.globl poly1305_blocks
|
||||
.globl poly1305_emit
|
||||
|
||||
.globl poly1305_init
|
||||
.type poly1305_init,%function
|
||||
.align 5
|
||||
poly1305_init:
|
||||
cmp $inp,xzr
|
||||
stp xzr,xzr,[$ctx] // zero hash value
|
||||
stp xzr,xzr,[$ctx,#16] // [along with is_base2_26]
|
||||
|
||||
csel x0,xzr,x0,eq
|
||||
b.eq .Lno_key
|
||||
|
||||
#ifndef __KERNEL__
|
||||
adrp x17,OPENSSL_armcap_P
|
||||
ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
|
||||
#endif
|
||||
|
||||
ldp $r0,$r1,[$inp] // load key
|
||||
mov $s1,#0xfffffffc0fffffff
|
||||
movk $s1,#0x0fff,lsl#48
|
||||
#ifdef __AARCH64EB__
|
||||
rev $r0,$r0 // flip bytes
|
||||
rev $r1,$r1
|
||||
#endif
|
||||
and $r0,$r0,$s1 // &=0ffffffc0fffffff
|
||||
and $s1,$s1,#-4
|
||||
and $r1,$r1,$s1 // &=0ffffffc0ffffffc
|
||||
mov w#$s1,#-1
|
||||
stp $r0,$r1,[$ctx,#32] // save key value
|
||||
str w#$s1,[$ctx,#48] // impossible key power value
|
||||
|
||||
#ifndef __KERNEL__
|
||||
tst w17,#ARMV7_NEON
|
||||
|
||||
adr $d0,.Lpoly1305_blocks
|
||||
adr $r0,.Lpoly1305_blocks_neon
|
||||
adr $d1,.Lpoly1305_emit
|
||||
|
||||
csel $d0,$d0,$r0,eq
|
||||
|
||||
# ifdef __ILP32__
|
||||
stp w#$d0,w#$d1,[$len]
|
||||
# else
|
||||
stp $d0,$d1,[$len]
|
||||
# endif
|
||||
#endif
|
||||
mov x0,#1
|
||||
.Lno_key:
|
||||
ret
|
||||
.size poly1305_init,.-poly1305_init
|
||||
|
||||
.type poly1305_blocks,%function
|
||||
.align 5
|
||||
poly1305_blocks:
|
||||
.Lpoly1305_blocks:
|
||||
ands $len,$len,#-16
|
||||
b.eq .Lno_data
|
||||
|
||||
ldp $h0,$h1,[$ctx] // load hash value
|
||||
ldp $h2,x17,[$ctx,#16] // [along with is_base2_26]
|
||||
ldp $r0,$r1,[$ctx,#32] // load key value
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
lsr $d0,$h0,#32
|
||||
mov w#$d1,w#$h0
|
||||
lsr $d2,$h1,#32
|
||||
mov w15,w#$h1
|
||||
lsr x16,$h2,#32
|
||||
#else
|
||||
mov w#$d0,w#$h0
|
||||
lsr $d1,$h0,#32
|
||||
mov w#$d2,w#$h1
|
||||
lsr x15,$h1,#32
|
||||
mov w16,w#$h2
|
||||
#endif
|
||||
|
||||
add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64
|
||||
lsr $d1,$d2,#12
|
||||
adds $d0,$d0,$d2,lsl#52
|
||||
add $d1,$d1,x15,lsl#14
|
||||
adc $d1,$d1,xzr
|
||||
lsr $d2,x16,#24
|
||||
adds $d1,$d1,x16,lsl#40
|
||||
adc $d2,$d2,xzr
|
||||
|
||||
cmp x17,#0 // is_base2_26?
|
||||
add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
|
||||
csel $h0,$h0,$d0,eq // choose between radixes
|
||||
csel $h1,$h1,$d1,eq
|
||||
csel $h2,$h2,$d2,eq
|
||||
|
||||
.Loop:
|
||||
ldp $t0,$t1,[$inp],#16 // load input
|
||||
sub $len,$len,#16
|
||||
#ifdef __AARCH64EB__
|
||||
rev $t0,$t0
|
||||
rev $t1,$t1
|
||||
#endif
|
||||
adds $h0,$h0,$t0 // accumulate input
|
||||
adcs $h1,$h1,$t1
|
||||
|
||||
mul $d0,$h0,$r0 // h0*r0
|
||||
adc $h2,$h2,$padbit
|
||||
umulh $d1,$h0,$r0
|
||||
|
||||
mul $t0,$h1,$s1 // h1*5*r1
|
||||
umulh $t1,$h1,$s1
|
||||
|
||||
adds $d0,$d0,$t0
|
||||
mul $t0,$h0,$r1 // h0*r1
|
||||
adc $d1,$d1,$t1
|
||||
umulh $d2,$h0,$r1
|
||||
|
||||
adds $d1,$d1,$t0
|
||||
mul $t0,$h1,$r0 // h1*r0
|
||||
adc $d2,$d2,xzr
|
||||
umulh $t1,$h1,$r0
|
||||
|
||||
adds $d1,$d1,$t0
|
||||
mul $t0,$h2,$s1 // h2*5*r1
|
||||
adc $d2,$d2,$t1
|
||||
mul $t1,$h2,$r0 // h2*r0
|
||||
|
||||
adds $d1,$d1,$t0
|
||||
adc $d2,$d2,$t1
|
||||
|
||||
and $t0,$d2,#-4 // final reduction
|
||||
and $h2,$d2,#3
|
||||
add $t0,$t0,$d2,lsr#2
|
||||
adds $h0,$d0,$t0
|
||||
adcs $h1,$d1,xzr
|
||||
adc $h2,$h2,xzr
|
||||
|
||||
cbnz $len,.Loop
|
||||
|
||||
stp $h0,$h1,[$ctx] // store hash value
|
||||
stp $h2,xzr,[$ctx,#16] // [and clear is_base2_26]
|
||||
|
||||
.Lno_data:
|
||||
ret
|
||||
.size poly1305_blocks,.-poly1305_blocks
|
||||
|
||||
.type poly1305_emit,%function
|
||||
.align 5
|
||||
poly1305_emit:
|
||||
.Lpoly1305_emit:
|
||||
ldp $h0,$h1,[$ctx] // load hash base 2^64
|
||||
ldp $h2,$r0,[$ctx,#16] // [along with is_base2_26]
|
||||
ldp $t0,$t1,[$nonce] // load nonce
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
lsr $d0,$h0,#32
|
||||
mov w#$d1,w#$h0
|
||||
lsr $d2,$h1,#32
|
||||
mov w15,w#$h1
|
||||
lsr x16,$h2,#32
|
||||
#else
|
||||
mov w#$d0,w#$h0
|
||||
lsr $d1,$h0,#32
|
||||
mov w#$d2,w#$h1
|
||||
lsr x15,$h1,#32
|
||||
mov w16,w#$h2
|
||||
#endif
|
||||
|
||||
add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64
|
||||
lsr $d1,$d2,#12
|
||||
adds $d0,$d0,$d2,lsl#52
|
||||
add $d1,$d1,x15,lsl#14
|
||||
adc $d1,$d1,xzr
|
||||
lsr $d2,x16,#24
|
||||
adds $d1,$d1,x16,lsl#40
|
||||
adc $d2,$d2,xzr
|
||||
|
||||
cmp $r0,#0 // is_base2_26?
|
||||
csel $h0,$h0,$d0,eq // choose between radixes
|
||||
csel $h1,$h1,$d1,eq
|
||||
csel $h2,$h2,$d2,eq
|
||||
|
||||
adds $d0,$h0,#5 // compare to modulus
|
||||
adcs $d1,$h1,xzr
|
||||
adc $d2,$h2,xzr
|
||||
|
||||
tst $d2,#-4 // see if it's carried/borrowed
|
||||
|
||||
csel $h0,$h0,$d0,eq
|
||||
csel $h1,$h1,$d1,eq
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
ror $t0,$t0,#32 // flip nonce words
|
||||
ror $t1,$t1,#32
|
||||
#endif
|
||||
adds $h0,$h0,$t0 // accumulate nonce
|
||||
adc $h1,$h1,$t1
|
||||
#ifdef __AARCH64EB__
|
||||
rev $h0,$h0 // flip output bytes
|
||||
rev $h1,$h1
|
||||
#endif
|
||||
stp $h0,$h1,[$mac] // write result
|
||||
|
||||
ret
|
||||
.size poly1305_emit,.-poly1305_emit
|
||||
___
|
||||
my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8));
|
||||
my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13));
|
||||
my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18));
|
||||
my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23));
|
||||
my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
|
||||
my ($T0,$T1,$MASK) = map("v$_",(29..31));
|
||||
|
||||
my ($in2,$zeros)=("x16","x17");
|
||||
my $is_base2_26 = $zeros; # borrow
|
||||
|
||||
$code.=<<___;
|
||||
.type poly1305_mult,%function
|
||||
.align 5
|
||||
poly1305_mult:
|
||||
mul $d0,$h0,$r0 // h0*r0
|
||||
umulh $d1,$h0,$r0
|
||||
|
||||
mul $t0,$h1,$s1 // h1*5*r1
|
||||
umulh $t1,$h1,$s1
|
||||
|
||||
adds $d0,$d0,$t0
|
||||
mul $t0,$h0,$r1 // h0*r1
|
||||
adc $d1,$d1,$t1
|
||||
umulh $d2,$h0,$r1
|
||||
|
||||
adds $d1,$d1,$t0
|
||||
mul $t0,$h1,$r0 // h1*r0
|
||||
adc $d2,$d2,xzr
|
||||
umulh $t1,$h1,$r0
|
||||
|
||||
adds $d1,$d1,$t0
|
||||
mul $t0,$h2,$s1 // h2*5*r1
|
||||
adc $d2,$d2,$t1
|
||||
mul $t1,$h2,$r0 // h2*r0
|
||||
|
||||
adds $d1,$d1,$t0
|
||||
adc $d2,$d2,$t1
|
||||
|
||||
and $t0,$d2,#-4 // final reduction
|
||||
and $h2,$d2,#3
|
||||
add $t0,$t0,$d2,lsr#2
|
||||
adds $h0,$d0,$t0
|
||||
adcs $h1,$d1,xzr
|
||||
adc $h2,$h2,xzr
|
||||
|
||||
ret
|
||||
.size poly1305_mult,.-poly1305_mult
|
||||
|
||||
.type poly1305_splat,%function
|
||||
.align 4
|
||||
poly1305_splat:
|
||||
and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26
|
||||
ubfx x13,$h0,#26,#26
|
||||
extr x14,$h1,$h0,#52
|
||||
and x14,x14,#0x03ffffff
|
||||
ubfx x15,$h1,#14,#26
|
||||
extr x16,$h2,$h1,#40
|
||||
|
||||
str w12,[$ctx,#16*0] // r0
|
||||
add w12,w13,w13,lsl#2 // r1*5
|
||||
str w13,[$ctx,#16*1] // r1
|
||||
add w13,w14,w14,lsl#2 // r2*5
|
||||
str w12,[$ctx,#16*2] // s1
|
||||
str w14,[$ctx,#16*3] // r2
|
||||
add w14,w15,w15,lsl#2 // r3*5
|
||||
str w13,[$ctx,#16*4] // s2
|
||||
str w15,[$ctx,#16*5] // r3
|
||||
add w15,w16,w16,lsl#2 // r4*5
|
||||
str w14,[$ctx,#16*6] // s3
|
||||
str w16,[$ctx,#16*7] // r4
|
||||
str w15,[$ctx,#16*8] // s4
|
||||
|
||||
ret
|
||||
.size poly1305_splat,.-poly1305_splat
|
||||
|
||||
#ifdef __KERNEL__
|
||||
.globl poly1305_blocks_neon
|
||||
#endif
|
||||
.type poly1305_blocks_neon,%function
|
||||
.align 5
|
||||
poly1305_blocks_neon:
|
||||
.Lpoly1305_blocks_neon:
|
||||
ldr $is_base2_26,[$ctx,#24]
|
||||
cmp $len,#128
|
||||
b.lo .Lpoly1305_blocks
|
||||
|
||||
.inst 0xd503233f // paciasp
|
||||
stp x29,x30,[sp,#-80]!
|
||||
add x29,sp,#0
|
||||
|
||||
stp d8,d9,[sp,#16] // meet ABI requirements
|
||||
stp d10,d11,[sp,#32]
|
||||
stp d12,d13,[sp,#48]
|
||||
stp d14,d15,[sp,#64]
|
||||
|
||||
cbz $is_base2_26,.Lbase2_64_neon
|
||||
|
||||
ldp w10,w11,[$ctx] // load hash value base 2^26
|
||||
ldp w12,w13,[$ctx,#8]
|
||||
ldr w14,[$ctx,#16]
|
||||
|
||||
tst $len,#31
|
||||
b.eq .Leven_neon
|
||||
|
||||
ldp $r0,$r1,[$ctx,#32] // load key value
|
||||
|
||||
add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
|
||||
lsr $h1,x12,#12
|
||||
adds $h0,$h0,x12,lsl#52
|
||||
add $h1,$h1,x13,lsl#14
|
||||
adc $h1,$h1,xzr
|
||||
lsr $h2,x14,#24
|
||||
adds $h1,$h1,x14,lsl#40
|
||||
adc $d2,$h2,xzr // can be partially reduced...
|
||||
|
||||
ldp $d0,$d1,[$inp],#16 // load input
|
||||
sub $len,$len,#16
|
||||
add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
rev $d0,$d0
|
||||
rev $d1,$d1
|
||||
#endif
|
||||
adds $h0,$h0,$d0 // accumulate input
|
||||
adcs $h1,$h1,$d1
|
||||
adc $h2,$h2,$padbit
|
||||
|
||||
bl poly1305_mult
|
||||
|
||||
and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
|
||||
ubfx x11,$h0,#26,#26
|
||||
extr x12,$h1,$h0,#52
|
||||
and x12,x12,#0x03ffffff
|
||||
ubfx x13,$h1,#14,#26
|
||||
extr x14,$h2,$h1,#40
|
||||
|
||||
b .Leven_neon
|
||||
|
||||
.align 4
|
||||
.Lbase2_64_neon:
|
||||
ldp $r0,$r1,[$ctx,#32] // load key value
|
||||
|
||||
ldp $h0,$h1,[$ctx] // load hash value base 2^64
|
||||
ldr $h2,[$ctx,#16]
|
||||
|
||||
tst $len,#31
|
||||
b.eq .Linit_neon
|
||||
|
||||
ldp $d0,$d1,[$inp],#16 // load input
|
||||
sub $len,$len,#16
|
||||
add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
|
||||
#ifdef __AARCH64EB__
|
||||
rev $d0,$d0
|
||||
rev $d1,$d1
|
||||
#endif
|
||||
adds $h0,$h0,$d0 // accumulate input
|
||||
adcs $h1,$h1,$d1
|
||||
adc $h2,$h2,$padbit
|
||||
|
||||
bl poly1305_mult
|
||||
|
||||
.Linit_neon:
|
||||
ldr w17,[$ctx,#48] // first table element
|
||||
and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
|
||||
ubfx x11,$h0,#26,#26
|
||||
extr x12,$h1,$h0,#52
|
||||
and x12,x12,#0x03ffffff
|
||||
ubfx x13,$h1,#14,#26
|
||||
extr x14,$h2,$h1,#40
|
||||
|
||||
cmp w17,#-1 // is value impossible?
|
||||
b.ne .Leven_neon
|
||||
|
||||
fmov ${H0},x10
|
||||
fmov ${H1},x11
|
||||
fmov ${H2},x12
|
||||
fmov ${H3},x13
|
||||
fmov ${H4},x14
|
||||
|
||||
////////////////////////////////// initialize r^n table
|
||||
mov $h0,$r0 // r^1
|
||||
add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
|
||||
mov $h1,$r1
|
||||
mov $h2,xzr
|
||||
add $ctx,$ctx,#48+12
|
||||
bl poly1305_splat
|
||||
|
||||
bl poly1305_mult // r^2
|
||||
sub $ctx,$ctx,#4
|
||||
bl poly1305_splat
|
||||
|
||||
bl poly1305_mult // r^3
|
||||
sub $ctx,$ctx,#4
|
||||
bl poly1305_splat
|
||||
|
||||
bl poly1305_mult // r^4
|
||||
sub $ctx,$ctx,#4
|
||||
bl poly1305_splat
|
||||
sub $ctx,$ctx,#48 // restore original $ctx
|
||||
b .Ldo_neon
|
||||
|
||||
.align 4
|
||||
.Leven_neon:
|
||||
fmov ${H0},x10
|
||||
fmov ${H1},x11
|
||||
fmov ${H2},x12
|
||||
fmov ${H3},x13
|
||||
fmov ${H4},x14
|
||||
|
||||
.Ldo_neon:
|
||||
ldp x8,x12,[$inp,#32] // inp[2:3]
|
||||
subs $len,$len,#64
|
||||
ldp x9,x13,[$inp,#48]
|
||||
add $in2,$inp,#96
|
||||
adr $zeros,.Lzeros
|
||||
|
||||
lsl $padbit,$padbit,#24
|
||||
add x15,$ctx,#48
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
rev x8,x8
|
||||
rev x12,x12
|
||||
rev x9,x9
|
||||
rev x13,x13
|
||||
#endif
|
||||
and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
|
||||
and x5,x9,#0x03ffffff
|
||||
ubfx x6,x8,#26,#26
|
||||
ubfx x7,x9,#26,#26
|
||||
add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
|
||||
extr x8,x12,x8,#52
|
||||
extr x9,x13,x9,#52
|
||||
add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
|
||||
fmov $IN23_0,x4
|
||||
and x8,x8,#0x03ffffff
|
||||
and x9,x9,#0x03ffffff
|
||||
ubfx x10,x12,#14,#26
|
||||
ubfx x11,x13,#14,#26
|
||||
add x12,$padbit,x12,lsr#40
|
||||
add x13,$padbit,x13,lsr#40
|
||||
add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
|
||||
fmov $IN23_1,x6
|
||||
add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
|
||||
add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
|
||||
fmov $IN23_2,x8
|
||||
fmov $IN23_3,x10
|
||||
fmov $IN23_4,x12
|
||||
|
||||
ldp x8,x12,[$inp],#16 // inp[0:1]
|
||||
ldp x9,x13,[$inp],#48
|
||||
|
||||
ld1 {$R0,$R1,$S1,$R2},[x15],#64
|
||||
ld1 {$S2,$R3,$S3,$R4},[x15],#64
|
||||
ld1 {$S4},[x15]
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
rev x8,x8
|
||||
rev x12,x12
|
||||
rev x9,x9
|
||||
rev x13,x13
|
||||
#endif
|
||||
and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
|
||||
and x5,x9,#0x03ffffff
|
||||
ubfx x6,x8,#26,#26
|
||||
ubfx x7,x9,#26,#26
|
||||
add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
|
||||
extr x8,x12,x8,#52
|
||||
extr x9,x13,x9,#52
|
||||
add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
|
||||
fmov $IN01_0,x4
|
||||
and x8,x8,#0x03ffffff
|
||||
and x9,x9,#0x03ffffff
|
||||
ubfx x10,x12,#14,#26
|
||||
ubfx x11,x13,#14,#26
|
||||
add x12,$padbit,x12,lsr#40
|
||||
add x13,$padbit,x13,lsr#40
|
||||
add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
|
||||
fmov $IN01_1,x6
|
||||
add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
|
||||
add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
|
||||
movi $MASK.2d,#-1
|
||||
fmov $IN01_2,x8
|
||||
fmov $IN01_3,x10
|
||||
fmov $IN01_4,x12
|
||||
ushr $MASK.2d,$MASK.2d,#38
|
||||
|
||||
b.ls .Lskip_loop
|
||||
|
||||
.align 4
|
||||
.Loop_neon:
|
||||
////////////////////////////////////////////////////////////////
|
||||
// ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
|
||||
// ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
|
||||
// \___________________/
|
||||
// ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
|
||||
// ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
|
||||
// \___________________/ \____________________/
|
||||
//
|
||||
// Note that we start with inp[2:3]*r^2. This is because it
|
||||
// doesn't depend on reduction in previous iteration.
|
||||
////////////////////////////////////////////////////////////////
|
||||
// d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
|
||||
// d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
|
||||
// d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
|
||||
// d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
|
||||
// d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
|
||||
|
||||
subs $len,$len,#64
|
||||
umull $ACC4,$IN23_0,${R4}[2]
|
||||
csel $in2,$zeros,$in2,lo
|
||||
umull $ACC3,$IN23_0,${R3}[2]
|
||||
umull $ACC2,$IN23_0,${R2}[2]
|
||||
ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
|
||||
umull $ACC1,$IN23_0,${R1}[2]
|
||||
ldp x9,x13,[$in2],#48
|
||||
umull $ACC0,$IN23_0,${R0}[2]
|
||||
#ifdef __AARCH64EB__
|
||||
rev x8,x8
|
||||
rev x12,x12
|
||||
rev x9,x9
|
||||
rev x13,x13
|
||||
#endif
|
||||
|
||||
umlal $ACC4,$IN23_1,${R3}[2]
|
||||
and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
|
||||
umlal $ACC3,$IN23_1,${R2}[2]
|
||||
and x5,x9,#0x03ffffff
|
||||
umlal $ACC2,$IN23_1,${R1}[2]
|
||||
ubfx x6,x8,#26,#26
|
||||
umlal $ACC1,$IN23_1,${R0}[2]
|
||||
ubfx x7,x9,#26,#26
|
||||
umlal $ACC0,$IN23_1,${S4}[2]
|
||||
add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
|
||||
|
||||
umlal $ACC4,$IN23_2,${R2}[2]
|
||||
extr x8,x12,x8,#52
|
||||
umlal $ACC3,$IN23_2,${R1}[2]
|
||||
extr x9,x13,x9,#52
|
||||
umlal $ACC2,$IN23_2,${R0}[2]
|
||||
add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
|
||||
umlal $ACC1,$IN23_2,${S4}[2]
|
||||
fmov $IN23_0,x4
|
||||
umlal $ACC0,$IN23_2,${S3}[2]
|
||||
and x8,x8,#0x03ffffff
|
||||
|
||||
umlal $ACC4,$IN23_3,${R1}[2]
|
||||
and x9,x9,#0x03ffffff
|
||||
umlal $ACC3,$IN23_3,${R0}[2]
|
||||
ubfx x10,x12,#14,#26
|
||||
umlal $ACC2,$IN23_3,${S4}[2]
|
||||
ubfx x11,x13,#14,#26
|
||||
umlal $ACC1,$IN23_3,${S3}[2]
|
||||
add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
|
||||
umlal $ACC0,$IN23_3,${S2}[2]
|
||||
fmov $IN23_1,x6
|
||||
|
||||
add $IN01_2,$IN01_2,$H2
|
||||
add x12,$padbit,x12,lsr#40
|
||||
umlal $ACC4,$IN23_4,${R0}[2]
|
||||
add x13,$padbit,x13,lsr#40
|
||||
umlal $ACC3,$IN23_4,${S4}[2]
|
||||
add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
|
||||
umlal $ACC2,$IN23_4,${S3}[2]
|
||||
add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
|
||||
umlal $ACC1,$IN23_4,${S2}[2]
|
||||
fmov $IN23_2,x8
|
||||
umlal $ACC0,$IN23_4,${S1}[2]
|
||||
fmov $IN23_3,x10
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// (hash+inp[0:1])*r^4 and accumulate
|
||||
|
||||
add $IN01_0,$IN01_0,$H0
|
||||
fmov $IN23_4,x12
|
||||
umlal $ACC3,$IN01_2,${R1}[0]
|
||||
ldp x8,x12,[$inp],#16 // inp[0:1]
|
||||
umlal $ACC0,$IN01_2,${S3}[0]
|
||||
ldp x9,x13,[$inp],#48
|
||||
umlal $ACC4,$IN01_2,${R2}[0]
|
||||
umlal $ACC1,$IN01_2,${S4}[0]
|
||||
umlal $ACC2,$IN01_2,${R0}[0]
|
||||
#ifdef __AARCH64EB__
|
||||
rev x8,x8
|
||||
rev x12,x12
|
||||
rev x9,x9
|
||||
rev x13,x13
|
||||
#endif
|
||||
|
||||
add $IN01_1,$IN01_1,$H1
|
||||
umlal $ACC3,$IN01_0,${R3}[0]
|
||||
umlal $ACC4,$IN01_0,${R4}[0]
|
||||
and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
|
||||
umlal $ACC2,$IN01_0,${R2}[0]
|
||||
and x5,x9,#0x03ffffff
|
||||
umlal $ACC0,$IN01_0,${R0}[0]
|
||||
ubfx x6,x8,#26,#26
|
||||
umlal $ACC1,$IN01_0,${R1}[0]
|
||||
ubfx x7,x9,#26,#26
|
||||
|
||||
add $IN01_3,$IN01_3,$H3
|
||||
add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
|
||||
umlal $ACC3,$IN01_1,${R2}[0]
|
||||
extr x8,x12,x8,#52
|
||||
umlal $ACC4,$IN01_1,${R3}[0]
|
||||
extr x9,x13,x9,#52
|
||||
umlal $ACC0,$IN01_1,${S4}[0]
|
||||
add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
|
||||
umlal $ACC2,$IN01_1,${R1}[0]
|
||||
fmov $IN01_0,x4
|
||||
umlal $ACC1,$IN01_1,${R0}[0]
|
||||
and x8,x8,#0x03ffffff
|
||||
|
||||
add $IN01_4,$IN01_4,$H4
|
||||
and x9,x9,#0x03ffffff
|
||||
umlal $ACC3,$IN01_3,${R0}[0]
|
||||
ubfx x10,x12,#14,#26
|
||||
umlal $ACC0,$IN01_3,${S2}[0]
|
||||
ubfx x11,x13,#14,#26
|
||||
umlal $ACC4,$IN01_3,${R1}[0]
|
||||
add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
|
||||
umlal $ACC1,$IN01_3,${S3}[0]
|
||||
fmov $IN01_1,x6
|
||||
umlal $ACC2,$IN01_3,${S4}[0]
|
||||
add x12,$padbit,x12,lsr#40
|
||||
|
||||
umlal $ACC3,$IN01_4,${S4}[0]
|
||||
add x13,$padbit,x13,lsr#40
|
||||
umlal $ACC0,$IN01_4,${S1}[0]
|
||||
add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
|
||||
umlal $ACC4,$IN01_4,${R0}[0]
|
||||
add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
|
||||
umlal $ACC1,$IN01_4,${S2}[0]
|
||||
fmov $IN01_2,x8
|
||||
umlal $ACC2,$IN01_4,${S3}[0]
|
||||
fmov $IN01_3,x10
|
||||
fmov $IN01_4,x12
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
|
||||
// and P. Schwabe
|
||||
//
|
||||
// [see discussion in poly1305-armv4 module]
|
||||
|
||||
ushr $T0.2d,$ACC3,#26
|
||||
xtn $H3,$ACC3
|
||||
ushr $T1.2d,$ACC0,#26
|
||||
and $ACC0,$ACC0,$MASK.2d
|
||||
add $ACC4,$ACC4,$T0.2d // h3 -> h4
|
||||
bic $H3,#0xfc,lsl#24 // &=0x03ffffff
|
||||
add $ACC1,$ACC1,$T1.2d // h0 -> h1
|
||||
|
||||
ushr $T0.2d,$ACC4,#26
|
||||
xtn $H4,$ACC4
|
||||
ushr $T1.2d,$ACC1,#26
|
||||
xtn $H1,$ACC1
|
||||
bic $H4,#0xfc,lsl#24
|
||||
add $ACC2,$ACC2,$T1.2d // h1 -> h2
|
||||
|
||||
add $ACC0,$ACC0,$T0.2d
|
||||
shl $T0.2d,$T0.2d,#2
|
||||
shrn $T1.2s,$ACC2,#26
|
||||
xtn $H2,$ACC2
|
||||
add $ACC0,$ACC0,$T0.2d // h4 -> h0
|
||||
bic $H1,#0xfc,lsl#24
|
||||
add $H3,$H3,$T1.2s // h2 -> h3
|
||||
bic $H2,#0xfc,lsl#24
|
||||
|
||||
shrn $T0.2s,$ACC0,#26
|
||||
xtn $H0,$ACC0
|
||||
ushr $T1.2s,$H3,#26
|
||||
bic $H3,#0xfc,lsl#24
|
||||
bic $H0,#0xfc,lsl#24
|
||||
add $H1,$H1,$T0.2s // h0 -> h1
|
||||
add $H4,$H4,$T1.2s // h3 -> h4
|
||||
|
||||
b.hi .Loop_neon
|
||||
|
||||
.Lskip_loop:
|
||||
dup $IN23_2,${IN23_2}[0]
|
||||
add $IN01_2,$IN01_2,$H2
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
|
||||
|
||||
adds $len,$len,#32
|
||||
b.ne .Long_tail
|
||||
|
||||
dup $IN23_2,${IN01_2}[0]
|
||||
add $IN23_0,$IN01_0,$H0
|
||||
add $IN23_3,$IN01_3,$H3
|
||||
add $IN23_1,$IN01_1,$H1
|
||||
add $IN23_4,$IN01_4,$H4
|
||||
|
||||
.Long_tail:
|
||||
dup $IN23_0,${IN23_0}[0]
|
||||
umull2 $ACC0,$IN23_2,${S3}
|
||||
umull2 $ACC3,$IN23_2,${R1}
|
||||
umull2 $ACC4,$IN23_2,${R2}
|
||||
umull2 $ACC2,$IN23_2,${R0}
|
||||
umull2 $ACC1,$IN23_2,${S4}
|
||||
|
||||
dup $IN23_1,${IN23_1}[0]
|
||||
umlal2 $ACC0,$IN23_0,${R0}
|
||||
umlal2 $ACC2,$IN23_0,${R2}
|
||||
umlal2 $ACC3,$IN23_0,${R3}
|
||||
umlal2 $ACC4,$IN23_0,${R4}
|
||||
umlal2 $ACC1,$IN23_0,${R1}
|
||||
|
||||
dup $IN23_3,${IN23_3}[0]
|
||||
umlal2 $ACC0,$IN23_1,${S4}
|
||||
umlal2 $ACC3,$IN23_1,${R2}
|
||||
umlal2 $ACC2,$IN23_1,${R1}
|
||||
umlal2 $ACC4,$IN23_1,${R3}
|
||||
umlal2 $ACC1,$IN23_1,${R0}
|
||||
|
||||
dup $IN23_4,${IN23_4}[0]
|
||||
umlal2 $ACC3,$IN23_3,${R0}
|
||||
umlal2 $ACC4,$IN23_3,${R1}
|
||||
umlal2 $ACC0,$IN23_3,${S2}
|
||||
umlal2 $ACC1,$IN23_3,${S3}
|
||||
umlal2 $ACC2,$IN23_3,${S4}
|
||||
|
||||
umlal2 $ACC3,$IN23_4,${S4}
|
||||
umlal2 $ACC0,$IN23_4,${S1}
|
||||
umlal2 $ACC4,$IN23_4,${R0}
|
||||
umlal2 $ACC1,$IN23_4,${S2}
|
||||
umlal2 $ACC2,$IN23_4,${S3}
|
||||
|
||||
b.eq .Lshort_tail
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// (hash+inp[0:1])*r^4:r^3 and accumulate
|
||||
|
||||
add $IN01_0,$IN01_0,$H0
|
||||
umlal $ACC3,$IN01_2,${R1}
|
||||
umlal $ACC0,$IN01_2,${S3}
|
||||
umlal $ACC4,$IN01_2,${R2}
|
||||
umlal $ACC1,$IN01_2,${S4}
|
||||
umlal $ACC2,$IN01_2,${R0}
|
||||
|
||||
add $IN01_1,$IN01_1,$H1
|
||||
umlal $ACC3,$IN01_0,${R3}
|
||||
umlal $ACC0,$IN01_0,${R0}
|
||||
umlal $ACC4,$IN01_0,${R4}
|
||||
umlal $ACC1,$IN01_0,${R1}
|
||||
umlal $ACC2,$IN01_0,${R2}
|
||||
|
||||
add $IN01_3,$IN01_3,$H3
|
||||
umlal $ACC3,$IN01_1,${R2}
|
||||
umlal $ACC0,$IN01_1,${S4}
|
||||
umlal $ACC4,$IN01_1,${R3}
|
||||
umlal $ACC1,$IN01_1,${R0}
|
||||
umlal $ACC2,$IN01_1,${R1}
|
||||
|
||||
add $IN01_4,$IN01_4,$H4
|
||||
umlal $ACC3,$IN01_3,${R0}
|
||||
umlal $ACC0,$IN01_3,${S2}
|
||||
umlal $ACC4,$IN01_3,${R1}
|
||||
umlal $ACC1,$IN01_3,${S3}
|
||||
umlal $ACC2,$IN01_3,${S4}
|
||||
|
||||
umlal $ACC3,$IN01_4,${S4}
|
||||
umlal $ACC0,$IN01_4,${S1}
|
||||
umlal $ACC4,$IN01_4,${R0}
|
||||
umlal $ACC1,$IN01_4,${S2}
|
||||
umlal $ACC2,$IN01_4,${S3}
|
||||
|
||||
.Lshort_tail:
|
||||
////////////////////////////////////////////////////////////////
|
||||
// horizontal add
|
||||
|
||||
addp $ACC3,$ACC3,$ACC3
|
||||
ldp d8,d9,[sp,#16] // meet ABI requirements
|
||||
addp $ACC0,$ACC0,$ACC0
|
||||
ldp d10,d11,[sp,#32]
|
||||
addp $ACC4,$ACC4,$ACC4
|
||||
ldp d12,d13,[sp,#48]
|
||||
addp $ACC1,$ACC1,$ACC1
|
||||
ldp d14,d15,[sp,#64]
|
||||
addp $ACC2,$ACC2,$ACC2
|
||||
ldr x30,[sp,#8]
|
||||
.inst 0xd50323bf // autiasp
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// lazy reduction, but without narrowing
|
||||
|
||||
ushr $T0.2d,$ACC3,#26
|
||||
and $ACC3,$ACC3,$MASK.2d
|
||||
ushr $T1.2d,$ACC0,#26
|
||||
and $ACC0,$ACC0,$MASK.2d
|
||||
|
||||
add $ACC4,$ACC4,$T0.2d // h3 -> h4
|
||||
add $ACC1,$ACC1,$T1.2d // h0 -> h1
|
||||
|
||||
ushr $T0.2d,$ACC4,#26
|
||||
and $ACC4,$ACC4,$MASK.2d
|
||||
ushr $T1.2d,$ACC1,#26
|
||||
and $ACC1,$ACC1,$MASK.2d
|
||||
add $ACC2,$ACC2,$T1.2d // h1 -> h2
|
||||
|
||||
add $ACC0,$ACC0,$T0.2d
|
||||
shl $T0.2d,$T0.2d,#2
|
||||
ushr $T1.2d,$ACC2,#26
|
||||
and $ACC2,$ACC2,$MASK.2d
|
||||
add $ACC0,$ACC0,$T0.2d // h4 -> h0
|
||||
add $ACC3,$ACC3,$T1.2d // h2 -> h3
|
||||
|
||||
ushr $T0.2d,$ACC0,#26
|
||||
and $ACC0,$ACC0,$MASK.2d
|
||||
ushr $T1.2d,$ACC3,#26
|
||||
and $ACC3,$ACC3,$MASK.2d
|
||||
add $ACC1,$ACC1,$T0.2d // h0 -> h1
|
||||
add $ACC4,$ACC4,$T1.2d // h3 -> h4
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// write the result, can be partially reduced
|
||||
|
||||
st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16
|
||||
mov x4,#1
|
||||
st1 {$ACC4}[0],[$ctx]
|
||||
str x4,[$ctx,#8] // set is_base2_26
|
||||
|
||||
ldr x29,[sp],#80
|
||||
ret
|
||||
.size poly1305_blocks_neon,.-poly1305_blocks_neon
|
||||
|
||||
.align 5
|
||||
.Lzeros:
|
||||
.long 0,0,0,0,0,0,0,0
|
||||
.asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm"
|
||||
.align 2
|
||||
#if !defined(__KERNEL__) && !defined(_WIN64)
|
||||
.comm OPENSSL_armcap_P,4,4
|
||||
.hidden OPENSSL_armcap_P
|
||||
#endif
|
||||
___
|
||||
|
||||
foreach (split("\n",$code)) {
|
||||
s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or
|
||||
s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or
|
||||
(m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or
|
||||
(m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or
|
||||
(m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or
|
||||
(m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or
|
||||
(m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1));
|
||||
|
||||
s/\.[124]([sd])\[/.$1\[/;
|
||||
s/w#x([0-9]+)/w$1/g;
|
||||
|
||||
print $_,"\n";
|
||||
}
|
||||
close STDOUT;
|
|
@ -0,0 +1,835 @@
|
|||
#ifndef __KERNEL__
|
||||
# include "arm_arch.h"
|
||||
.extern OPENSSL_armcap_P
|
||||
#endif
|
||||
|
||||
.text
|
||||
|
||||
// forward "declarations" are required for Apple
|
||||
.globl poly1305_blocks
|
||||
.globl poly1305_emit
|
||||
|
||||
.globl poly1305_init
|
||||
.type poly1305_init,%function
|
||||
.align 5
|
||||
poly1305_init:
|
||||
cmp x1,xzr
|
||||
stp xzr,xzr,[x0] // zero hash value
|
||||
stp xzr,xzr,[x0,#16] // [along with is_base2_26]
|
||||
|
||||
csel x0,xzr,x0,eq
|
||||
b.eq .Lno_key
|
||||
|
||||
#ifndef __KERNEL__
|
||||
adrp x17,OPENSSL_armcap_P
|
||||
ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
|
||||
#endif
|
||||
|
||||
ldp x7,x8,[x1] // load key
|
||||
mov x9,#0xfffffffc0fffffff
|
||||
movk x9,#0x0fff,lsl#48
|
||||
#ifdef __AARCH64EB__
|
||||
rev x7,x7 // flip bytes
|
||||
rev x8,x8
|
||||
#endif
|
||||
and x7,x7,x9 // &=0ffffffc0fffffff
|
||||
and x9,x9,#-4
|
||||
and x8,x8,x9 // &=0ffffffc0ffffffc
|
||||
mov w9,#-1
|
||||
stp x7,x8,[x0,#32] // save key value
|
||||
str w9,[x0,#48] // impossible key power value
|
||||
|
||||
#ifndef __KERNEL__
|
||||
tst w17,#ARMV7_NEON
|
||||
|
||||
adr x12,.Lpoly1305_blocks
|
||||
adr x7,.Lpoly1305_blocks_neon
|
||||
adr x13,.Lpoly1305_emit
|
||||
|
||||
csel x12,x12,x7,eq
|
||||
|
||||
# ifdef __ILP32__
|
||||
stp w12,w13,[x2]
|
||||
# else
|
||||
stp x12,x13,[x2]
|
||||
# endif
|
||||
#endif
|
||||
mov x0,#1
|
||||
.Lno_key:
|
||||
ret
|
||||
.size poly1305_init,.-poly1305_init
|
||||
|
||||
.type poly1305_blocks,%function
|
||||
.align 5
|
||||
poly1305_blocks:
|
||||
.Lpoly1305_blocks:
|
||||
ands x2,x2,#-16
|
||||
b.eq .Lno_data
|
||||
|
||||
ldp x4,x5,[x0] // load hash value
|
||||
ldp x6,x17,[x0,#16] // [along with is_base2_26]
|
||||
ldp x7,x8,[x0,#32] // load key value
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
lsr x12,x4,#32
|
||||
mov w13,w4
|
||||
lsr x14,x5,#32
|
||||
mov w15,w5
|
||||
lsr x16,x6,#32
|
||||
#else
|
||||
mov w12,w4
|
||||
lsr x13,x4,#32
|
||||
mov w14,w5
|
||||
lsr x15,x5,#32
|
||||
mov w16,w6
|
||||
#endif
|
||||
|
||||
add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
|
||||
lsr x13,x14,#12
|
||||
adds x12,x12,x14,lsl#52
|
||||
add x13,x13,x15,lsl#14
|
||||
adc x13,x13,xzr
|
||||
lsr x14,x16,#24
|
||||
adds x13,x13,x16,lsl#40
|
||||
adc x14,x14,xzr
|
||||
|
||||
cmp x17,#0 // is_base2_26?
|
||||
add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
|
||||
csel x4,x4,x12,eq // choose between radixes
|
||||
csel x5,x5,x13,eq
|
||||
csel x6,x6,x14,eq
|
||||
|
||||
.Loop:
|
||||
ldp x10,x11,[x1],#16 // load input
|
||||
sub x2,x2,#16
|
||||
#ifdef __AARCH64EB__
|
||||
rev x10,x10
|
||||
rev x11,x11
|
||||
#endif
|
||||
adds x4,x4,x10 // accumulate input
|
||||
adcs x5,x5,x11
|
||||
|
||||
mul x12,x4,x7 // h0*r0
|
||||
adc x6,x6,x3
|
||||
umulh x13,x4,x7
|
||||
|
||||
mul x10,x5,x9 // h1*5*r1
|
||||
umulh x11,x5,x9
|
||||
|
||||
adds x12,x12,x10
|
||||
mul x10,x4,x8 // h0*r1
|
||||
adc x13,x13,x11
|
||||
umulh x14,x4,x8
|
||||
|
||||
adds x13,x13,x10
|
||||
mul x10,x5,x7 // h1*r0
|
||||
adc x14,x14,xzr
|
||||
umulh x11,x5,x7
|
||||
|
||||
adds x13,x13,x10
|
||||
mul x10,x6,x9 // h2*5*r1
|
||||
adc x14,x14,x11
|
||||
mul x11,x6,x7 // h2*r0
|
||||
|
||||
adds x13,x13,x10
|
||||
adc x14,x14,x11
|
||||
|
||||
and x10,x14,#-4 // final reduction
|
||||
and x6,x14,#3
|
||||
add x10,x10,x14,lsr#2
|
||||
adds x4,x12,x10
|
||||
adcs x5,x13,xzr
|
||||
adc x6,x6,xzr
|
||||
|
||||
cbnz x2,.Loop
|
||||
|
||||
stp x4,x5,[x0] // store hash value
|
||||
stp x6,xzr,[x0,#16] // [and clear is_base2_26]
|
||||
|
||||
.Lno_data:
|
||||
ret
|
||||
.size poly1305_blocks,.-poly1305_blocks
|
||||
|
||||
.type poly1305_emit,%function
|
||||
.align 5
|
||||
poly1305_emit:
|
||||
.Lpoly1305_emit:
|
||||
ldp x4,x5,[x0] // load hash base 2^64
|
||||
ldp x6,x7,[x0,#16] // [along with is_base2_26]
|
||||
ldp x10,x11,[x2] // load nonce
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
lsr x12,x4,#32
|
||||
mov w13,w4
|
||||
lsr x14,x5,#32
|
||||
mov w15,w5
|
||||
lsr x16,x6,#32
|
||||
#else
|
||||
mov w12,w4
|
||||
lsr x13,x4,#32
|
||||
mov w14,w5
|
||||
lsr x15,x5,#32
|
||||
mov w16,w6
|
||||
#endif
|
||||
|
||||
add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
|
||||
lsr x13,x14,#12
|
||||
adds x12,x12,x14,lsl#52
|
||||
add x13,x13,x15,lsl#14
|
||||
adc x13,x13,xzr
|
||||
lsr x14,x16,#24
|
||||
adds x13,x13,x16,lsl#40
|
||||
adc x14,x14,xzr
|
||||
|
||||
cmp x7,#0 // is_base2_26?
|
||||
csel x4,x4,x12,eq // choose between radixes
|
||||
csel x5,x5,x13,eq
|
||||
csel x6,x6,x14,eq
|
||||
|
||||
adds x12,x4,#5 // compare to modulus
|
||||
adcs x13,x5,xzr
|
||||
adc x14,x6,xzr
|
||||
|
||||
tst x14,#-4 // see if it's carried/borrowed
|
||||
|
||||
csel x4,x4,x12,eq
|
||||
csel x5,x5,x13,eq
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
ror x10,x10,#32 // flip nonce words
|
||||
ror x11,x11,#32
|
||||
#endif
|
||||
adds x4,x4,x10 // accumulate nonce
|
||||
adc x5,x5,x11
|
||||
#ifdef __AARCH64EB__
|
||||
rev x4,x4 // flip output bytes
|
||||
rev x5,x5
|
||||
#endif
|
||||
stp x4,x5,[x1] // write result
|
||||
|
||||
ret
|
||||
.size poly1305_emit,.-poly1305_emit
|
||||
.type poly1305_mult,%function
|
||||
.align 5
|
||||
poly1305_mult:
|
||||
mul x12,x4,x7 // h0*r0
|
||||
umulh x13,x4,x7
|
||||
|
||||
mul x10,x5,x9 // h1*5*r1
|
||||
umulh x11,x5,x9
|
||||
|
||||
adds x12,x12,x10
|
||||
mul x10,x4,x8 // h0*r1
|
||||
adc x13,x13,x11
|
||||
umulh x14,x4,x8
|
||||
|
||||
adds x13,x13,x10
|
||||
mul x10,x5,x7 // h1*r0
|
||||
adc x14,x14,xzr
|
||||
umulh x11,x5,x7
|
||||
|
||||
adds x13,x13,x10
|
||||
mul x10,x6,x9 // h2*5*r1
|
||||
adc x14,x14,x11
|
||||
mul x11,x6,x7 // h2*r0
|
||||
|
||||
adds x13,x13,x10
|
||||
adc x14,x14,x11
|
||||
|
||||
and x10,x14,#-4 // final reduction
|
||||
and x6,x14,#3
|
||||
add x10,x10,x14,lsr#2
|
||||
adds x4,x12,x10
|
||||
adcs x5,x13,xzr
|
||||
adc x6,x6,xzr
|
||||
|
||||
ret
|
||||
.size poly1305_mult,.-poly1305_mult
|
||||
|
||||
.type poly1305_splat,%function
|
||||
.align 4
|
||||
poly1305_splat:
|
||||
and x12,x4,#0x03ffffff // base 2^64 -> base 2^26
|
||||
ubfx x13,x4,#26,#26
|
||||
extr x14,x5,x4,#52
|
||||
and x14,x14,#0x03ffffff
|
||||
ubfx x15,x5,#14,#26
|
||||
extr x16,x6,x5,#40
|
||||
|
||||
str w12,[x0,#16*0] // r0
|
||||
add w12,w13,w13,lsl#2 // r1*5
|
||||
str w13,[x0,#16*1] // r1
|
||||
add w13,w14,w14,lsl#2 // r2*5
|
||||
str w12,[x0,#16*2] // s1
|
||||
str w14,[x0,#16*3] // r2
|
||||
add w14,w15,w15,lsl#2 // r3*5
|
||||
str w13,[x0,#16*4] // s2
|
||||
str w15,[x0,#16*5] // r3
|
||||
add w15,w16,w16,lsl#2 // r4*5
|
||||
str w14,[x0,#16*6] // s3
|
||||
str w16,[x0,#16*7] // r4
|
||||
str w15,[x0,#16*8] // s4
|
||||
|
||||
ret
|
||||
.size poly1305_splat,.-poly1305_splat
|
||||
|
||||
#ifdef __KERNEL__
|
||||
.globl poly1305_blocks_neon
|
||||
#endif
|
||||
.type poly1305_blocks_neon,%function
|
||||
.align 5
|
||||
poly1305_blocks_neon:
|
||||
.Lpoly1305_blocks_neon:
|
||||
ldr x17,[x0,#24]
|
||||
cmp x2,#128
|
||||
b.lo .Lpoly1305_blocks
|
||||
|
||||
.inst 0xd503233f // paciasp
|
||||
stp x29,x30,[sp,#-80]!
|
||||
add x29,sp,#0
|
||||
|
||||
stp d8,d9,[sp,#16] // meet ABI requirements
|
||||
stp d10,d11,[sp,#32]
|
||||
stp d12,d13,[sp,#48]
|
||||
stp d14,d15,[sp,#64]
|
||||
|
||||
cbz x17,.Lbase2_64_neon
|
||||
|
||||
ldp w10,w11,[x0] // load hash value base 2^26
|
||||
ldp w12,w13,[x0,#8]
|
||||
ldr w14,[x0,#16]
|
||||
|
||||
tst x2,#31
|
||||
b.eq .Leven_neon
|
||||
|
||||
ldp x7,x8,[x0,#32] // load key value
|
||||
|
||||
add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64
|
||||
lsr x5,x12,#12
|
||||
adds x4,x4,x12,lsl#52
|
||||
add x5,x5,x13,lsl#14
|
||||
adc x5,x5,xzr
|
||||
lsr x6,x14,#24
|
||||
adds x5,x5,x14,lsl#40
|
||||
adc x14,x6,xzr // can be partially reduced...
|
||||
|
||||
ldp x12,x13,[x1],#16 // load input
|
||||
sub x2,x2,#16
|
||||
add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
rev x12,x12
|
||||
rev x13,x13
|
||||
#endif
|
||||
adds x4,x4,x12 // accumulate input
|
||||
adcs x5,x5,x13
|
||||
adc x6,x6,x3
|
||||
|
||||
bl poly1305_mult
|
||||
|
||||
and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
|
||||
ubfx x11,x4,#26,#26
|
||||
extr x12,x5,x4,#52
|
||||
and x12,x12,#0x03ffffff
|
||||
ubfx x13,x5,#14,#26
|
||||
extr x14,x6,x5,#40
|
||||
|
||||
b .Leven_neon
|
||||
|
||||
.align 4
|
||||
.Lbase2_64_neon:
|
||||
ldp x7,x8,[x0,#32] // load key value
|
||||
|
||||
ldp x4,x5,[x0] // load hash value base 2^64
|
||||
ldr x6,[x0,#16]
|
||||
|
||||
tst x2,#31
|
||||
b.eq .Linit_neon
|
||||
|
||||
ldp x12,x13,[x1],#16 // load input
|
||||
sub x2,x2,#16
|
||||
add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
|
||||
#ifdef __AARCH64EB__
|
||||
rev x12,x12
|
||||
rev x13,x13
|
||||
#endif
|
||||
adds x4,x4,x12 // accumulate input
|
||||
adcs x5,x5,x13
|
||||
adc x6,x6,x3
|
||||
|
||||
bl poly1305_mult
|
||||
|
||||
.Linit_neon:
|
||||
ldr w17,[x0,#48] // first table element
|
||||
and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
|
||||
ubfx x11,x4,#26,#26
|
||||
extr x12,x5,x4,#52
|
||||
and x12,x12,#0x03ffffff
|
||||
ubfx x13,x5,#14,#26
|
||||
extr x14,x6,x5,#40
|
||||
|
||||
cmp w17,#-1 // is value impossible?
|
||||
b.ne .Leven_neon
|
||||
|
||||
fmov d24,x10
|
||||
fmov d25,x11
|
||||
fmov d26,x12
|
||||
fmov d27,x13
|
||||
fmov d28,x14
|
||||
|
||||
////////////////////////////////// initialize r^n table
|
||||
mov x4,x7 // r^1
|
||||
add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
|
||||
mov x5,x8
|
||||
mov x6,xzr
|
||||
add x0,x0,#48+12
|
||||
bl poly1305_splat
|
||||
|
||||
bl poly1305_mult // r^2
|
||||
sub x0,x0,#4
|
||||
bl poly1305_splat
|
||||
|
||||
bl poly1305_mult // r^3
|
||||
sub x0,x0,#4
|
||||
bl poly1305_splat
|
||||
|
||||
bl poly1305_mult // r^4
|
||||
sub x0,x0,#4
|
||||
bl poly1305_splat
|
||||
sub x0,x0,#48 // restore original x0
|
||||
b .Ldo_neon
|
||||
|
||||
.align 4
|
||||
.Leven_neon:
|
||||
fmov d24,x10
|
||||
fmov d25,x11
|
||||
fmov d26,x12
|
||||
fmov d27,x13
|
||||
fmov d28,x14
|
||||
|
||||
.Ldo_neon:
|
||||
ldp x8,x12,[x1,#32] // inp[2:3]
|
||||
subs x2,x2,#64
|
||||
ldp x9,x13,[x1,#48]
|
||||
add x16,x1,#96
|
||||
adr x17,.Lzeros
|
||||
|
||||
lsl x3,x3,#24
|
||||
add x15,x0,#48
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
rev x8,x8
|
||||
rev x12,x12
|
||||
rev x9,x9
|
||||
rev x13,x13
|
||||
#endif
|
||||
and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
|
||||
and x5,x9,#0x03ffffff
|
||||
ubfx x6,x8,#26,#26
|
||||
ubfx x7,x9,#26,#26
|
||||
add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
|
||||
extr x8,x12,x8,#52
|
||||
extr x9,x13,x9,#52
|
||||
add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
|
||||
fmov d14,x4
|
||||
and x8,x8,#0x03ffffff
|
||||
and x9,x9,#0x03ffffff
|
||||
ubfx x10,x12,#14,#26
|
||||
ubfx x11,x13,#14,#26
|
||||
add x12,x3,x12,lsr#40
|
||||
add x13,x3,x13,lsr#40
|
||||
add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
|
||||
fmov d15,x6
|
||||
add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
|
||||
add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
|
||||
fmov d16,x8
|
||||
fmov d17,x10
|
||||
fmov d18,x12
|
||||
|
||||
ldp x8,x12,[x1],#16 // inp[0:1]
|
||||
ldp x9,x13,[x1],#48
|
||||
|
||||
ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[x15],#64
|
||||
ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64
|
||||
ld1 {v8.4s},[x15]
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
rev x8,x8
|
||||
rev x12,x12
|
||||
rev x9,x9
|
||||
rev x13,x13
|
||||
#endif
|
||||
and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
|
||||
and x5,x9,#0x03ffffff
|
||||
ubfx x6,x8,#26,#26
|
||||
ubfx x7,x9,#26,#26
|
||||
add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
|
||||
extr x8,x12,x8,#52
|
||||
extr x9,x13,x9,#52
|
||||
add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
|
||||
fmov d9,x4
|
||||
and x8,x8,#0x03ffffff
|
||||
and x9,x9,#0x03ffffff
|
||||
ubfx x10,x12,#14,#26
|
||||
ubfx x11,x13,#14,#26
|
||||
add x12,x3,x12,lsr#40
|
||||
add x13,x3,x13,lsr#40
|
||||
add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
|
||||
fmov d10,x6
|
||||
add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
|
||||
add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
|
||||
movi v31.2d,#-1
|
||||
fmov d11,x8
|
||||
fmov d12,x10
|
||||
fmov d13,x12
|
||||
ushr v31.2d,v31.2d,#38
|
||||
|
||||
b.ls .Lskip_loop
|
||||
|
||||
.align 4
|
||||
.Loop_neon:
|
||||
////////////////////////////////////////////////////////////////
|
||||
// ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
|
||||
// ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
|
||||
// ___________________/
|
||||
// ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
|
||||
// ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
|
||||
// ___________________/ ____________________/
|
||||
//
|
||||
// Note that we start with inp[2:3]*r^2. This is because it
|
||||
// doesn't depend on reduction in previous iteration.
|
||||
////////////////////////////////////////////////////////////////
|
||||
// d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
|
||||
// d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
|
||||
// d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
|
||||
// d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
|
||||
// d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
|
||||
|
||||
subs x2,x2,#64
|
||||
umull v23.2d,v14.2s,v7.s[2]
|
||||
csel x16,x17,x16,lo
|
||||
umull v22.2d,v14.2s,v5.s[2]
|
||||
umull v21.2d,v14.2s,v3.s[2]
|
||||
ldp x8,x12,[x16],#16 // inp[2:3] (or zero)
|
||||
umull v20.2d,v14.2s,v1.s[2]
|
||||
ldp x9,x13,[x16],#48
|
||||
umull v19.2d,v14.2s,v0.s[2]
|
||||
#ifdef __AARCH64EB__
|
||||
rev x8,x8
|
||||
rev x12,x12
|
||||
rev x9,x9
|
||||
rev x13,x13
|
||||
#endif
|
||||
|
||||
umlal v23.2d,v15.2s,v5.s[2]
|
||||
and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
|
||||
umlal v22.2d,v15.2s,v3.s[2]
|
||||
and x5,x9,#0x03ffffff
|
||||
umlal v21.2d,v15.2s,v1.s[2]
|
||||
ubfx x6,x8,#26,#26
|
||||
umlal v20.2d,v15.2s,v0.s[2]
|
||||
ubfx x7,x9,#26,#26
|
||||
umlal v19.2d,v15.2s,v8.s[2]
|
||||
add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
|
||||
|
||||
umlal v23.2d,v16.2s,v3.s[2]
|
||||
extr x8,x12,x8,#52
|
||||
umlal v22.2d,v16.2s,v1.s[2]
|
||||
extr x9,x13,x9,#52
|
||||
umlal v21.2d,v16.2s,v0.s[2]
|
||||
add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
|
||||
umlal v20.2d,v16.2s,v8.s[2]
|
||||
fmov d14,x4
|
||||
umlal v19.2d,v16.2s,v6.s[2]
|
||||
and x8,x8,#0x03ffffff
|
||||
|
||||
umlal v23.2d,v17.2s,v1.s[2]
|
||||
and x9,x9,#0x03ffffff
|
||||
umlal v22.2d,v17.2s,v0.s[2]
|
||||
ubfx x10,x12,#14,#26
|
||||
umlal v21.2d,v17.2s,v8.s[2]
|
||||
ubfx x11,x13,#14,#26
|
||||
umlal v20.2d,v17.2s,v6.s[2]
|
||||
add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
|
||||
umlal v19.2d,v17.2s,v4.s[2]
|
||||
fmov d15,x6
|
||||
|
||||
add v11.2s,v11.2s,v26.2s
|
||||
add x12,x3,x12,lsr#40
|
||||
umlal v23.2d,v18.2s,v0.s[2]
|
||||
add x13,x3,x13,lsr#40
|
||||
umlal v22.2d,v18.2s,v8.s[2]
|
||||
add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
|
||||
umlal v21.2d,v18.2s,v6.s[2]
|
||||
add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
|
||||
umlal v20.2d,v18.2s,v4.s[2]
|
||||
fmov d16,x8
|
||||
umlal v19.2d,v18.2s,v2.s[2]
|
||||
fmov d17,x10
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// (hash+inp[0:1])*r^4 and accumulate
|
||||
|
||||
add v9.2s,v9.2s,v24.2s
|
||||
fmov d18,x12
|
||||
umlal v22.2d,v11.2s,v1.s[0]
|
||||
ldp x8,x12,[x1],#16 // inp[0:1]
|
||||
umlal v19.2d,v11.2s,v6.s[0]
|
||||
ldp x9,x13,[x1],#48
|
||||
umlal v23.2d,v11.2s,v3.s[0]
|
||||
umlal v20.2d,v11.2s,v8.s[0]
|
||||
umlal v21.2d,v11.2s,v0.s[0]
|
||||
#ifdef __AARCH64EB__
|
||||
rev x8,x8
|
||||
rev x12,x12
|
||||
rev x9,x9
|
||||
rev x13,x13
|
||||
#endif
|
||||
|
||||
add v10.2s,v10.2s,v25.2s
|
||||
umlal v22.2d,v9.2s,v5.s[0]
|
||||
umlal v23.2d,v9.2s,v7.s[0]
|
||||
and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
|
||||
umlal v21.2d,v9.2s,v3.s[0]
|
||||
and x5,x9,#0x03ffffff
|
||||
umlal v19.2d,v9.2s,v0.s[0]
|
||||
ubfx x6,x8,#26,#26
|
||||
umlal v20.2d,v9.2s,v1.s[0]
|
||||
ubfx x7,x9,#26,#26
|
||||
|
||||
add v12.2s,v12.2s,v27.2s
|
||||
add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
|
||||
umlal v22.2d,v10.2s,v3.s[0]
|
||||
extr x8,x12,x8,#52
|
||||
umlal v23.2d,v10.2s,v5.s[0]
|
||||
extr x9,x13,x9,#52
|
||||
umlal v19.2d,v10.2s,v8.s[0]
|
||||
add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
|
||||
umlal v21.2d,v10.2s,v1.s[0]
|
||||
fmov d9,x4
|
||||
umlal v20.2d,v10.2s,v0.s[0]
|
||||
and x8,x8,#0x03ffffff
|
||||
|
||||
add v13.2s,v13.2s,v28.2s
|
||||
and x9,x9,#0x03ffffff
|
||||
umlal v22.2d,v12.2s,v0.s[0]
|
||||
ubfx x10,x12,#14,#26
|
||||
umlal v19.2d,v12.2s,v4.s[0]
|
||||
ubfx x11,x13,#14,#26
|
||||
umlal v23.2d,v12.2s,v1.s[0]
|
||||
add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
|
||||
umlal v20.2d,v12.2s,v6.s[0]
|
||||
fmov d10,x6
|
||||
umlal v21.2d,v12.2s,v8.s[0]
|
||||
add x12,x3,x12,lsr#40
|
||||
|
||||
umlal v22.2d,v13.2s,v8.s[0]
|
||||
add x13,x3,x13,lsr#40
|
||||
umlal v19.2d,v13.2s,v2.s[0]
|
||||
add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
|
||||
umlal v23.2d,v13.2s,v0.s[0]
|
||||
add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
|
||||
umlal v20.2d,v13.2s,v4.s[0]
|
||||
fmov d11,x8
|
||||
umlal v21.2d,v13.2s,v6.s[0]
|
||||
fmov d12,x10
|
||||
fmov d13,x12
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
|
||||
// and P. Schwabe
|
||||
//
|
||||
// [see discussion in poly1305-armv4 module]
|
||||
|
||||
ushr v29.2d,v22.2d,#26
|
||||
xtn v27.2s,v22.2d
|
||||
ushr v30.2d,v19.2d,#26
|
||||
and v19.16b,v19.16b,v31.16b
|
||||
add v23.2d,v23.2d,v29.2d // h3 -> h4
|
||||
bic v27.2s,#0xfc,lsl#24 // &=0x03ffffff
|
||||
add v20.2d,v20.2d,v30.2d // h0 -> h1
|
||||
|
||||
ushr v29.2d,v23.2d,#26
|
||||
xtn v28.2s,v23.2d
|
||||
ushr v30.2d,v20.2d,#26
|
||||
xtn v25.2s,v20.2d
|
||||
bic v28.2s,#0xfc,lsl#24
|
||||
add v21.2d,v21.2d,v30.2d // h1 -> h2
|
||||
|
||||
add v19.2d,v19.2d,v29.2d
|
||||
shl v29.2d,v29.2d,#2
|
||||
shrn v30.2s,v21.2d,#26
|
||||
xtn v26.2s,v21.2d
|
||||
add v19.2d,v19.2d,v29.2d // h4 -> h0
|
||||
bic v25.2s,#0xfc,lsl#24
|
||||
add v27.2s,v27.2s,v30.2s // h2 -> h3
|
||||
bic v26.2s,#0xfc,lsl#24
|
||||
|
||||
shrn v29.2s,v19.2d,#26
|
||||
xtn v24.2s,v19.2d
|
||||
ushr v30.2s,v27.2s,#26
|
||||
bic v27.2s,#0xfc,lsl#24
|
||||
bic v24.2s,#0xfc,lsl#24
|
||||
add v25.2s,v25.2s,v29.2s // h0 -> h1
|
||||
add v28.2s,v28.2s,v30.2s // h3 -> h4
|
||||
|
||||
b.hi .Loop_neon
|
||||
|
||||
.Lskip_loop:
|
||||
dup v16.2d,v16.d[0]
|
||||
add v11.2s,v11.2s,v26.2s
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
|
||||
|
||||
adds x2,x2,#32
|
||||
b.ne .Long_tail
|
||||
|
||||
dup v16.2d,v11.d[0]
|
||||
add v14.2s,v9.2s,v24.2s
|
||||
add v17.2s,v12.2s,v27.2s
|
||||
add v15.2s,v10.2s,v25.2s
|
||||
add v18.2s,v13.2s,v28.2s
|
||||
|
||||
.Long_tail:
|
||||
dup v14.2d,v14.d[0]
|
||||
umull2 v19.2d,v16.4s,v6.4s
|
||||
umull2 v22.2d,v16.4s,v1.4s
|
||||
umull2 v23.2d,v16.4s,v3.4s
|
||||
umull2 v21.2d,v16.4s,v0.4s
|
||||
umull2 v20.2d,v16.4s,v8.4s
|
||||
|
||||
dup v15.2d,v15.d[0]
|
||||
umlal2 v19.2d,v14.4s,v0.4s
|
||||
umlal2 v21.2d,v14.4s,v3.4s
|
||||
umlal2 v22.2d,v14.4s,v5.4s
|
||||
umlal2 v23.2d,v14.4s,v7.4s
|
||||
umlal2 v20.2d,v14.4s,v1.4s
|
||||
|
||||
dup v17.2d,v17.d[0]
|
||||
umlal2 v19.2d,v15.4s,v8.4s
|
||||
umlal2 v22.2d,v15.4s,v3.4s
|
||||
umlal2 v21.2d,v15.4s,v1.4s
|
||||
umlal2 v23.2d,v15.4s,v5.4s
|
||||
umlal2 v20.2d,v15.4s,v0.4s
|
||||
|
||||
dup v18.2d,v18.d[0]
|
||||
umlal2 v22.2d,v17.4s,v0.4s
|
||||
umlal2 v23.2d,v17.4s,v1.4s
|
||||
umlal2 v19.2d,v17.4s,v4.4s
|
||||
umlal2 v20.2d,v17.4s,v6.4s
|
||||
umlal2 v21.2d,v17.4s,v8.4s
|
||||
|
||||
umlal2 v22.2d,v18.4s,v8.4s
|
||||
umlal2 v19.2d,v18.4s,v2.4s
|
||||
umlal2 v23.2d,v18.4s,v0.4s
|
||||
umlal2 v20.2d,v18.4s,v4.4s
|
||||
umlal2 v21.2d,v18.4s,v6.4s
|
||||
|
||||
b.eq .Lshort_tail
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// (hash+inp[0:1])*r^4:r^3 and accumulate
|
||||
|
||||
add v9.2s,v9.2s,v24.2s
|
||||
umlal v22.2d,v11.2s,v1.2s
|
||||
umlal v19.2d,v11.2s,v6.2s
|
||||
umlal v23.2d,v11.2s,v3.2s
|
||||
umlal v20.2d,v11.2s,v8.2s
|
||||
umlal v21.2d,v11.2s,v0.2s
|
||||
|
||||
add v10.2s,v10.2s,v25.2s
|
||||
umlal v22.2d,v9.2s,v5.2s
|
||||
umlal v19.2d,v9.2s,v0.2s
|
||||
umlal v23.2d,v9.2s,v7.2s
|
||||
umlal v20.2d,v9.2s,v1.2s
|
||||
umlal v21.2d,v9.2s,v3.2s
|
||||
|
||||
add v12.2s,v12.2s,v27.2s
|
||||
umlal v22.2d,v10.2s,v3.2s
|
||||
umlal v19.2d,v10.2s,v8.2s
|
||||
umlal v23.2d,v10.2s,v5.2s
|
||||
umlal v20.2d,v10.2s,v0.2s
|
||||
umlal v21.2d,v10.2s,v1.2s
|
||||
|
||||
add v13.2s,v13.2s,v28.2s
|
||||
umlal v22.2d,v12.2s,v0.2s
|
||||
umlal v19.2d,v12.2s,v4.2s
|
||||
umlal v23.2d,v12.2s,v1.2s
|
||||
umlal v20.2d,v12.2s,v6.2s
|
||||
umlal v21.2d,v12.2s,v8.2s
|
||||
|
||||
umlal v22.2d,v13.2s,v8.2s
|
||||
umlal v19.2d,v13.2s,v2.2s
|
||||
umlal v23.2d,v13.2s,v0.2s
|
||||
umlal v20.2d,v13.2s,v4.2s
|
||||
umlal v21.2d,v13.2s,v6.2s
|
||||
|
||||
.Lshort_tail:
|
||||
////////////////////////////////////////////////////////////////
|
||||
// horizontal add
|
||||
|
||||
addp v22.2d,v22.2d,v22.2d
|
||||
ldp d8,d9,[sp,#16] // meet ABI requirements
|
||||
addp v19.2d,v19.2d,v19.2d
|
||||
ldp d10,d11,[sp,#32]
|
||||
addp v23.2d,v23.2d,v23.2d
|
||||
ldp d12,d13,[sp,#48]
|
||||
addp v20.2d,v20.2d,v20.2d
|
||||
ldp d14,d15,[sp,#64]
|
||||
addp v21.2d,v21.2d,v21.2d
|
||||
ldr x30,[sp,#8]
|
||||
.inst 0xd50323bf // autiasp
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// lazy reduction, but without narrowing
|
||||
|
||||
ushr v29.2d,v22.2d,#26
|
||||
and v22.16b,v22.16b,v31.16b
|
||||
ushr v30.2d,v19.2d,#26
|
||||
and v19.16b,v19.16b,v31.16b
|
||||
|
||||
add v23.2d,v23.2d,v29.2d // h3 -> h4
|
||||
add v20.2d,v20.2d,v30.2d // h0 -> h1
|
||||
|
||||
ushr v29.2d,v23.2d,#26
|
||||
and v23.16b,v23.16b,v31.16b
|
||||
ushr v30.2d,v20.2d,#26
|
||||
and v20.16b,v20.16b,v31.16b
|
||||
add v21.2d,v21.2d,v30.2d // h1 -> h2
|
||||
|
||||
add v19.2d,v19.2d,v29.2d
|
||||
shl v29.2d,v29.2d,#2
|
||||
ushr v30.2d,v21.2d,#26
|
||||
and v21.16b,v21.16b,v31.16b
|
||||
add v19.2d,v19.2d,v29.2d // h4 -> h0
|
||||
add v22.2d,v22.2d,v30.2d // h2 -> h3
|
||||
|
||||
ushr v29.2d,v19.2d,#26
|
||||
and v19.16b,v19.16b,v31.16b
|
||||
ushr v30.2d,v22.2d,#26
|
||||
and v22.16b,v22.16b,v31.16b
|
||||
add v20.2d,v20.2d,v29.2d // h0 -> h1
|
||||
add v23.2d,v23.2d,v30.2d // h3 -> h4
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// write the result, can be partially reduced
|
||||
|
||||
st4 {v19.s,v20.s,v21.s,v22.s}[0],[x0],#16
|
||||
mov x4,#1
|
||||
st1 {v23.s}[0],[x0]
|
||||
str x4,[x0,#8] // set is_base2_26
|
||||
|
||||
ldr x29,[sp],#80
|
||||
ret
|
||||
.size poly1305_blocks_neon,.-poly1305_blocks_neon
|
||||
|
||||
.align 5
|
||||
.Lzeros:
|
||||
.long 0,0,0,0,0,0,0,0
|
||||
.asciz "Poly1305 for ARMv8, CRYPTOGAMS by @dot-asm"
|
||||
.align 2
|
||||
#if !defined(__KERNEL__) && !defined(_WIN64)
|
||||
.comm OPENSSL_armcap_P,4,4
|
||||
.hidden OPENSSL_armcap_P
|
||||
#endif
|
|
@ -0,0 +1,237 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* OpenSSL/Cryptogams accelerated Poly1305 transform for arm64
|
||||
*
|
||||
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/poly1305.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
asmlinkage void poly1305_init_arm64(void *state, const u8 *key);
|
||||
asmlinkage void poly1305_blocks(void *state, const u8 *src, u32 len, u32 hibit);
|
||||
asmlinkage void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit);
|
||||
asmlinkage void poly1305_emit(void *state, __le32 *digest, const u32 *nonce);
|
||||
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
||||
|
||||
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
|
||||
{
|
||||
poly1305_init_arm64(&dctx->h, key);
|
||||
dctx->s[0] = get_unaligned_le32(key + 16);
|
||||
dctx->s[1] = get_unaligned_le32(key + 20);
|
||||
dctx->s[2] = get_unaligned_le32(key + 24);
|
||||
dctx->s[3] = get_unaligned_le32(key + 28);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_init_arch);
|
||||
|
||||
static int neon_poly1305_init(struct shash_desc *desc)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
dctx->buflen = 0;
|
||||
dctx->rset = 0;
|
||||
dctx->sset = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void neon_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
|
||||
u32 len, u32 hibit, bool do_neon)
|
||||
{
|
||||
if (unlikely(!dctx->sset)) {
|
||||
if (!dctx->rset) {
|
||||
poly1305_init_arch(dctx, src);
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
len -= POLY1305_BLOCK_SIZE;
|
||||
dctx->rset = 1;
|
||||
}
|
||||
if (len >= POLY1305_BLOCK_SIZE) {
|
||||
dctx->s[0] = get_unaligned_le32(src + 0);
|
||||
dctx->s[1] = get_unaligned_le32(src + 4);
|
||||
dctx->s[2] = get_unaligned_le32(src + 8);
|
||||
dctx->s[3] = get_unaligned_le32(src + 12);
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
len -= POLY1305_BLOCK_SIZE;
|
||||
dctx->sset = true;
|
||||
}
|
||||
if (len < POLY1305_BLOCK_SIZE)
|
||||
return;
|
||||
}
|
||||
|
||||
len &= ~(POLY1305_BLOCK_SIZE - 1);
|
||||
|
||||
if (static_branch_likely(&have_neon) && likely(do_neon))
|
||||
poly1305_blocks_neon(&dctx->h, src, len, hibit);
|
||||
else
|
||||
poly1305_blocks(&dctx->h, src, len, hibit);
|
||||
}
|
||||
|
||||
static void neon_poly1305_do_update(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, u32 len, bool do_neon)
|
||||
{
|
||||
if (unlikely(dctx->buflen)) {
|
||||
u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
|
||||
memcpy(dctx->buf + dctx->buflen, src, bytes);
|
||||
src += bytes;
|
||||
len -= bytes;
|
||||
dctx->buflen += bytes;
|
||||
|
||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||
neon_poly1305_blocks(dctx, dctx->buf,
|
||||
POLY1305_BLOCK_SIZE, 1, false);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(len >= POLY1305_BLOCK_SIZE)) {
|
||||
neon_poly1305_blocks(dctx, src, len, 1, do_neon);
|
||||
src += round_down(len, POLY1305_BLOCK_SIZE);
|
||||
len %= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (unlikely(len)) {
|
||||
dctx->buflen = len;
|
||||
memcpy(dctx->buf, src, len);
|
||||
}
|
||||
}
|
||||
|
||||
static int neon_poly1305_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
bool do_neon = crypto_simd_usable() && srclen > 128;
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
if (static_branch_likely(&have_neon) && do_neon)
|
||||
kernel_neon_begin();
|
||||
neon_poly1305_do_update(dctx, src, srclen, do_neon);
|
||||
if (static_branch_likely(&have_neon) && do_neon)
|
||||
kernel_neon_end();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
if (unlikely(dctx->buflen)) {
|
||||
u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
|
||||
memcpy(dctx->buf + dctx->buflen, src, bytes);
|
||||
src += bytes;
|
||||
nbytes -= bytes;
|
||||
dctx->buflen += bytes;
|
||||
|
||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||
poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
|
||||
unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
|
||||
|
||||
if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
|
||||
kernel_neon_begin();
|
||||
poly1305_blocks_neon(&dctx->h, src, len, 1);
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
poly1305_blocks(&dctx->h, src, len, 1);
|
||||
}
|
||||
src += len;
|
||||
nbytes %= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (unlikely(nbytes)) {
|
||||
dctx->buflen = nbytes;
|
||||
memcpy(dctx->buf, src, nbytes);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_update_arch);
|
||||
|
||||
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
||||
{
|
||||
__le32 digest[4];
|
||||
u64 f = 0;
|
||||
|
||||
if (unlikely(dctx->buflen)) {
|
||||
dctx->buf[dctx->buflen++] = 1;
|
||||
memset(dctx->buf + dctx->buflen, 0,
|
||||
POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
||||
}
|
||||
|
||||
poly1305_emit(&dctx->h, digest, dctx->s);
|
||||
|
||||
/* mac = (h + s) % (2^128) */
|
||||
f = (f >> 32) + le32_to_cpu(digest[0]);
|
||||
put_unaligned_le32(f, dst);
|
||||
f = (f >> 32) + le32_to_cpu(digest[1]);
|
||||
put_unaligned_le32(f, dst + 4);
|
||||
f = (f >> 32) + le32_to_cpu(digest[2]);
|
||||
put_unaligned_le32(f, dst + 8);
|
||||
f = (f >> 32) + le32_to_cpu(digest[3]);
|
||||
put_unaligned_le32(f, dst + 12);
|
||||
|
||||
*dctx = (struct poly1305_desc_ctx){};
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_final_arch);
|
||||
|
||||
static int neon_poly1305_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
if (unlikely(!dctx->sset))
|
||||
return -ENOKEY;
|
||||
|
||||
poly1305_final_arch(dctx, dst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg neon_poly1305_alg = {
|
||||
.init = neon_poly1305_init,
|
||||
.update = neon_poly1305_update,
|
||||
.final = neon_poly1305_final,
|
||||
.digestsize = POLY1305_DIGEST_SIZE,
|
||||
.descsize = sizeof(struct poly1305_desc_ctx),
|
||||
|
||||
.base.cra_name = "poly1305",
|
||||
.base.cra_driver_name = "poly1305-neon",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = POLY1305_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init neon_poly1305_mod_init(void)
|
||||
{
|
||||
if (!cpu_have_named_feature(ASIMD))
|
||||
return 0;
|
||||
|
||||
static_branch_enable(&have_neon);
|
||||
|
||||
return crypto_register_shash(&neon_poly1305_alg);
|
||||
}
|
||||
|
||||
static void __exit neon_poly1305_mod_exit(void)
|
||||
{
|
||||
if (cpu_have_named_feature(ASIMD))
|
||||
crypto_unregister_shash(&neon_poly1305_alg);
|
||||
}
|
||||
|
||||
module_init(neon_poly1305_mod_init);
|
||||
module_exit(neon_poly1305_mod_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("poly1305");
|
||||
MODULE_ALIAS_CRYPTO("poly1305-neon");
|
|
@ -326,7 +326,7 @@ libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/mips/math-emu/
|
|||
# See arch/mips/Kbuild for content of core part of the kernel
|
||||
core-y += arch/mips/
|
||||
|
||||
drivers-$(CONFIG_MIPS_CRC_SUPPORT) += arch/mips/crypto/
|
||||
drivers-y += arch/mips/crypto/
|
||||
drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/
|
||||
|
||||
# suspend and hibernation support
|
||||
|
|
|
@ -4,3 +4,21 @@
|
|||
#
|
||||
|
||||
obj-$(CONFIG_CRYPTO_CRC32_MIPS) += crc32-mips.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
|
||||
chacha-mips-y := chacha-core.o chacha-glue.o
|
||||
AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
|
||||
|
||||
obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
|
||||
poly1305-mips-y := poly1305-core.o poly1305-glue.o
|
||||
|
||||
perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32
|
||||
perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64
|
||||
|
||||
quiet_cmd_perlasm = PERLASM $@
|
||||
cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
|
||||
|
||||
$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE
|
||||
$(call if_changed,perlasm)
|
||||
|
||||
targets += poly1305-core.S
|
||||
|
|
|
@ -0,0 +1,497 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
||||
/*
|
||||
* Copyright (C) 2016-2018 René van Dorst <opensource@vdorst.com>. All Rights Reserved.
|
||||
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#define MASK_U32 0x3c
|
||||
#define CHACHA20_BLOCK_SIZE 64
|
||||
#define STACK_SIZE 32
|
||||
|
||||
#define X0 $t0
|
||||
#define X1 $t1
|
||||
#define X2 $t2
|
||||
#define X3 $t3
|
||||
#define X4 $t4
|
||||
#define X5 $t5
|
||||
#define X6 $t6
|
||||
#define X7 $t7
|
||||
#define X8 $t8
|
||||
#define X9 $t9
|
||||
#define X10 $v1
|
||||
#define X11 $s6
|
||||
#define X12 $s5
|
||||
#define X13 $s4
|
||||
#define X14 $s3
|
||||
#define X15 $s2
|
||||
/* Use regs which are overwritten on exit for Tx so we don't leak clear data. */
|
||||
#define T0 $s1
|
||||
#define T1 $s0
|
||||
#define T(n) T ## n
|
||||
#define X(n) X ## n
|
||||
|
||||
/* Input arguments */
|
||||
#define STATE $a0
|
||||
#define OUT $a1
|
||||
#define IN $a2
|
||||
#define BYTES $a3
|
||||
|
||||
/* Output argument */
|
||||
/* NONCE[0] is kept in a register and not in memory.
|
||||
* We don't want to touch original value in memory.
|
||||
* Must be incremented every loop iteration.
|
||||
*/
|
||||
#define NONCE_0 $v0
|
||||
|
||||
/* SAVED_X and SAVED_CA are set in the jump table.
|
||||
* Use regs which are overwritten on exit else we don't leak clear data.
|
||||
* They are used to handling the last bytes which are not multiple of 4.
|
||||
*/
|
||||
#define SAVED_X X15
|
||||
#define SAVED_CA $s7
|
||||
|
||||
#define IS_UNALIGNED $s7
|
||||
|
||||
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
#define MSB 0
|
||||
#define LSB 3
|
||||
#define ROTx rotl
|
||||
#define ROTR(n) rotr n, 24
|
||||
#define CPU_TO_LE32(n) \
|
||||
wsbh n; \
|
||||
rotr n, 16;
|
||||
#else
|
||||
#define MSB 3
|
||||
#define LSB 0
|
||||
#define ROTx rotr
|
||||
#define CPU_TO_LE32(n)
|
||||
#define ROTR(n)
|
||||
#endif
|
||||
|
||||
#define FOR_EACH_WORD(x) \
|
||||
x( 0); \
|
||||
x( 1); \
|
||||
x( 2); \
|
||||
x( 3); \
|
||||
x( 4); \
|
||||
x( 5); \
|
||||
x( 6); \
|
||||
x( 7); \
|
||||
x( 8); \
|
||||
x( 9); \
|
||||
x(10); \
|
||||
x(11); \
|
||||
x(12); \
|
||||
x(13); \
|
||||
x(14); \
|
||||
x(15);
|
||||
|
||||
#define FOR_EACH_WORD_REV(x) \
|
||||
x(15); \
|
||||
x(14); \
|
||||
x(13); \
|
||||
x(12); \
|
||||
x(11); \
|
||||
x(10); \
|
||||
x( 9); \
|
||||
x( 8); \
|
||||
x( 7); \
|
||||
x( 6); \
|
||||
x( 5); \
|
||||
x( 4); \
|
||||
x( 3); \
|
||||
x( 2); \
|
||||
x( 1); \
|
||||
x( 0);
|
||||
|
||||
#define PLUS_ONE_0 1
|
||||
#define PLUS_ONE_1 2
|
||||
#define PLUS_ONE_2 3
|
||||
#define PLUS_ONE_3 4
|
||||
#define PLUS_ONE_4 5
|
||||
#define PLUS_ONE_5 6
|
||||
#define PLUS_ONE_6 7
|
||||
#define PLUS_ONE_7 8
|
||||
#define PLUS_ONE_8 9
|
||||
#define PLUS_ONE_9 10
|
||||
#define PLUS_ONE_10 11
|
||||
#define PLUS_ONE_11 12
|
||||
#define PLUS_ONE_12 13
|
||||
#define PLUS_ONE_13 14
|
||||
#define PLUS_ONE_14 15
|
||||
#define PLUS_ONE_15 16
|
||||
#define PLUS_ONE(x) PLUS_ONE_ ## x
|
||||
#define _CONCAT3(a,b,c) a ## b ## c
|
||||
#define CONCAT3(a,b,c) _CONCAT3(a,b,c)
|
||||
|
||||
#define STORE_UNALIGNED(x) \
|
||||
CONCAT3(.Lchacha_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
|
||||
.if (x != 12); \
|
||||
lw T0, (x*4)(STATE); \
|
||||
.endif; \
|
||||
lwl T1, (x*4)+MSB ## (IN); \
|
||||
lwr T1, (x*4)+LSB ## (IN); \
|
||||
.if (x == 12); \
|
||||
addu X ## x, NONCE_0; \
|
||||
.else; \
|
||||
addu X ## x, T0; \
|
||||
.endif; \
|
||||
CPU_TO_LE32(X ## x); \
|
||||
xor X ## x, T1; \
|
||||
swl X ## x, (x*4)+MSB ## (OUT); \
|
||||
swr X ## x, (x*4)+LSB ## (OUT);
|
||||
|
||||
#define STORE_ALIGNED(x) \
|
||||
CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
|
||||
.if (x != 12); \
|
||||
lw T0, (x*4)(STATE); \
|
||||
.endif; \
|
||||
lw T1, (x*4) ## (IN); \
|
||||
.if (x == 12); \
|
||||
addu X ## x, NONCE_0; \
|
||||
.else; \
|
||||
addu X ## x, T0; \
|
||||
.endif; \
|
||||
CPU_TO_LE32(X ## x); \
|
||||
xor X ## x, T1; \
|
||||
sw X ## x, (x*4) ## (OUT);
|
||||
|
||||
/* Jump table macro.
|
||||
* Used for setup and handling the last bytes, which are not multiple of 4.
|
||||
* X15 is free to store Xn
|
||||
* Every jumptable entry must be equal in size.
|
||||
*/
|
||||
#define JMPTBL_ALIGNED(x) \
|
||||
.Lchacha_mips_jmptbl_aligned_ ## x: ; \
|
||||
.set noreorder; \
|
||||
b .Lchacha_mips_xor_aligned_ ## x ## _b; \
|
||||
.if (x == 12); \
|
||||
addu SAVED_X, X ## x, NONCE_0; \
|
||||
.else; \
|
||||
addu SAVED_X, X ## x, SAVED_CA; \
|
||||
.endif; \
|
||||
.set reorder
|
||||
|
||||
#define JMPTBL_UNALIGNED(x) \
|
||||
.Lchacha_mips_jmptbl_unaligned_ ## x: ; \
|
||||
.set noreorder; \
|
||||
b .Lchacha_mips_xor_unaligned_ ## x ## _b; \
|
||||
.if (x == 12); \
|
||||
addu SAVED_X, X ## x, NONCE_0; \
|
||||
.else; \
|
||||
addu SAVED_X, X ## x, SAVED_CA; \
|
||||
.endif; \
|
||||
.set reorder
|
||||
|
||||
#define AXR(A, B, C, D, K, L, M, N, V, W, Y, Z, S) \
|
||||
addu X(A), X(K); \
|
||||
addu X(B), X(L); \
|
||||
addu X(C), X(M); \
|
||||
addu X(D), X(N); \
|
||||
xor X(V), X(A); \
|
||||
xor X(W), X(B); \
|
||||
xor X(Y), X(C); \
|
||||
xor X(Z), X(D); \
|
||||
rotl X(V), S; \
|
||||
rotl X(W), S; \
|
||||
rotl X(Y), S; \
|
||||
rotl X(Z), S;
|
||||
|
||||
.text
|
||||
.set reorder
|
||||
.set noat
|
||||
.globl chacha_crypt_arch
|
||||
.ent chacha_crypt_arch
|
||||
chacha_crypt_arch:
|
||||
.frame $sp, STACK_SIZE, $ra
|
||||
|
||||
/* Load number of rounds */
|
||||
lw $at, 16($sp)
|
||||
|
||||
addiu $sp, -STACK_SIZE
|
||||
|
||||
/* Return bytes = 0. */
|
||||
beqz BYTES, .Lchacha_mips_end
|
||||
|
||||
lw NONCE_0, 48(STATE)
|
||||
|
||||
/* Save s0-s7 */
|
||||
sw $s0, 0($sp)
|
||||
sw $s1, 4($sp)
|
||||
sw $s2, 8($sp)
|
||||
sw $s3, 12($sp)
|
||||
sw $s4, 16($sp)
|
||||
sw $s5, 20($sp)
|
||||
sw $s6, 24($sp)
|
||||
sw $s7, 28($sp)
|
||||
|
||||
/* Test IN or OUT is unaligned.
|
||||
* IS_UNALIGNED = ( IN | OUT ) & 0x00000003
|
||||
*/
|
||||
or IS_UNALIGNED, IN, OUT
|
||||
andi IS_UNALIGNED, 0x3
|
||||
|
||||
b .Lchacha_rounds_start
|
||||
|
||||
.align 4
|
||||
.Loop_chacha_rounds:
|
||||
addiu IN, CHACHA20_BLOCK_SIZE
|
||||
addiu OUT, CHACHA20_BLOCK_SIZE
|
||||
addiu NONCE_0, 1
|
||||
|
||||
.Lchacha_rounds_start:
|
||||
lw X0, 0(STATE)
|
||||
lw X1, 4(STATE)
|
||||
lw X2, 8(STATE)
|
||||
lw X3, 12(STATE)
|
||||
|
||||
lw X4, 16(STATE)
|
||||
lw X5, 20(STATE)
|
||||
lw X6, 24(STATE)
|
||||
lw X7, 28(STATE)
|
||||
lw X8, 32(STATE)
|
||||
lw X9, 36(STATE)
|
||||
lw X10, 40(STATE)
|
||||
lw X11, 44(STATE)
|
||||
|
||||
move X12, NONCE_0
|
||||
lw X13, 52(STATE)
|
||||
lw X14, 56(STATE)
|
||||
lw X15, 60(STATE)
|
||||
|
||||
.Loop_chacha_xor_rounds:
|
||||
addiu $at, -2
|
||||
AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
|
||||
AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
|
||||
AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
|
||||
AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
|
||||
AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
|
||||
AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
|
||||
AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
|
||||
AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
|
||||
bnez $at, .Loop_chacha_xor_rounds
|
||||
|
||||
addiu BYTES, -(CHACHA20_BLOCK_SIZE)
|
||||
|
||||
/* Is data src/dst unaligned? Jump */
|
||||
bnez IS_UNALIGNED, .Loop_chacha_unaligned
|
||||
|
||||
/* Set number rounds here to fill delayslot. */
|
||||
lw $at, (STACK_SIZE+16)($sp)
|
||||
|
||||
/* BYTES < 0, it has no full block. */
|
||||
bltz BYTES, .Lchacha_mips_no_full_block_aligned
|
||||
|
||||
FOR_EACH_WORD_REV(STORE_ALIGNED)
|
||||
|
||||
/* BYTES > 0? Loop again. */
|
||||
bgtz BYTES, .Loop_chacha_rounds
|
||||
|
||||
/* Place this here to fill delay slot */
|
||||
addiu NONCE_0, 1
|
||||
|
||||
/* BYTES < 0? Handle last bytes */
|
||||
bltz BYTES, .Lchacha_mips_xor_bytes
|
||||
|
||||
.Lchacha_mips_xor_done:
|
||||
/* Restore used registers */
|
||||
lw $s0, 0($sp)
|
||||
lw $s1, 4($sp)
|
||||
lw $s2, 8($sp)
|
||||
lw $s3, 12($sp)
|
||||
lw $s4, 16($sp)
|
||||
lw $s5, 20($sp)
|
||||
lw $s6, 24($sp)
|
||||
lw $s7, 28($sp)
|
||||
|
||||
/* Write NONCE_0 back to right location in state */
|
||||
sw NONCE_0, 48(STATE)
|
||||
|
||||
.Lchacha_mips_end:
|
||||
addiu $sp, STACK_SIZE
|
||||
jr $ra
|
||||
|
||||
.Lchacha_mips_no_full_block_aligned:
|
||||
/* Restore the offset on BYTES */
|
||||
addiu BYTES, CHACHA20_BLOCK_SIZE
|
||||
|
||||
/* Get number of full WORDS */
|
||||
andi $at, BYTES, MASK_U32
|
||||
|
||||
/* Load upper half of jump table addr */
|
||||
lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0)
|
||||
|
||||
/* Calculate lower half jump table offset */
|
||||
ins T0, $at, 1, 6
|
||||
|
||||
/* Add offset to STATE */
|
||||
addu T1, STATE, $at
|
||||
|
||||
/* Add lower half jump table addr */
|
||||
addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0)
|
||||
|
||||
/* Read value from STATE */
|
||||
lw SAVED_CA, 0(T1)
|
||||
|
||||
/* Store remaining bytecounter as negative value */
|
||||
subu BYTES, $at, BYTES
|
||||
|
||||
jr T0
|
||||
|
||||
/* Jump table */
|
||||
FOR_EACH_WORD(JMPTBL_ALIGNED)
|
||||
|
||||
|
||||
.Loop_chacha_unaligned:
|
||||
/* Set number rounds here to fill delayslot. */
|
||||
lw $at, (STACK_SIZE+16)($sp)
|
||||
|
||||
/* BYTES > 0, it has no full block. */
|
||||
bltz BYTES, .Lchacha_mips_no_full_block_unaligned
|
||||
|
||||
FOR_EACH_WORD_REV(STORE_UNALIGNED)
|
||||
|
||||
/* BYTES > 0? Loop again. */
|
||||
bgtz BYTES, .Loop_chacha_rounds
|
||||
|
||||
/* Write NONCE_0 back to right location in state */
|
||||
sw NONCE_0, 48(STATE)
|
||||
|
||||
.set noreorder
|
||||
/* Fall through to byte handling */
|
||||
bgez BYTES, .Lchacha_mips_xor_done
|
||||
.Lchacha_mips_xor_unaligned_0_b:
|
||||
.Lchacha_mips_xor_aligned_0_b:
|
||||
/* Place this here to fill delay slot */
|
||||
addiu NONCE_0, 1
|
||||
.set reorder
|
||||
|
||||
.Lchacha_mips_xor_bytes:
|
||||
addu IN, $at
|
||||
addu OUT, $at
|
||||
/* First byte */
|
||||
lbu T1, 0(IN)
|
||||
addiu $at, BYTES, 1
|
||||
CPU_TO_LE32(SAVED_X)
|
||||
ROTR(SAVED_X)
|
||||
xor T1, SAVED_X
|
||||
sb T1, 0(OUT)
|
||||
beqz $at, .Lchacha_mips_xor_done
|
||||
/* Second byte */
|
||||
lbu T1, 1(IN)
|
||||
addiu $at, BYTES, 2
|
||||
ROTx SAVED_X, 8
|
||||
xor T1, SAVED_X
|
||||
sb T1, 1(OUT)
|
||||
beqz $at, .Lchacha_mips_xor_done
|
||||
/* Third byte */
|
||||
lbu T1, 2(IN)
|
||||
ROTx SAVED_X, 8
|
||||
xor T1, SAVED_X
|
||||
sb T1, 2(OUT)
|
||||
b .Lchacha_mips_xor_done
|
||||
|
||||
.Lchacha_mips_no_full_block_unaligned:
|
||||
/* Restore the offset on BYTES */
|
||||
addiu BYTES, CHACHA20_BLOCK_SIZE
|
||||
|
||||
/* Get number of full WORDS */
|
||||
andi $at, BYTES, MASK_U32
|
||||
|
||||
/* Load upper half of jump table addr */
|
||||
lui T0, %hi(.Lchacha_mips_jmptbl_unaligned_0)
|
||||
|
||||
/* Calculate lower half jump table offset */
|
||||
ins T0, $at, 1, 6
|
||||
|
||||
/* Add offset to STATE */
|
||||
addu T1, STATE, $at
|
||||
|
||||
/* Add lower half jump table addr */
|
||||
addiu T0, %lo(.Lchacha_mips_jmptbl_unaligned_0)
|
||||
|
||||
/* Read value from STATE */
|
||||
lw SAVED_CA, 0(T1)
|
||||
|
||||
/* Store remaining bytecounter as negative value */
|
||||
subu BYTES, $at, BYTES
|
||||
|
||||
jr T0
|
||||
|
||||
/* Jump table */
|
||||
FOR_EACH_WORD(JMPTBL_UNALIGNED)
|
||||
.end chacha_crypt_arch
|
||||
.set at
|
||||
|
||||
/* Input arguments
|
||||
* STATE $a0
|
||||
* OUT $a1
|
||||
* NROUND $a2
|
||||
*/
|
||||
|
||||
#undef X12
|
||||
#undef X13
|
||||
#undef X14
|
||||
#undef X15
|
||||
|
||||
#define X12 $a3
|
||||
#define X13 $at
|
||||
#define X14 $v0
|
||||
#define X15 STATE
|
||||
|
||||
.set noat
|
||||
.globl hchacha_block_arch
|
||||
.ent hchacha_block_arch
|
||||
hchacha_block_arch:
|
||||
.frame $sp, STACK_SIZE, $ra
|
||||
|
||||
addiu $sp, -STACK_SIZE
|
||||
|
||||
/* Save X11(s6) */
|
||||
sw X11, 0($sp)
|
||||
|
||||
lw X0, 0(STATE)
|
||||
lw X1, 4(STATE)
|
||||
lw X2, 8(STATE)
|
||||
lw X3, 12(STATE)
|
||||
lw X4, 16(STATE)
|
||||
lw X5, 20(STATE)
|
||||
lw X6, 24(STATE)
|
||||
lw X7, 28(STATE)
|
||||
lw X8, 32(STATE)
|
||||
lw X9, 36(STATE)
|
||||
lw X10, 40(STATE)
|
||||
lw X11, 44(STATE)
|
||||
lw X12, 48(STATE)
|
||||
lw X13, 52(STATE)
|
||||
lw X14, 56(STATE)
|
||||
lw X15, 60(STATE)
|
||||
|
||||
.Loop_hchacha_xor_rounds:
|
||||
addiu $a2, -2
|
||||
AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
|
||||
AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
|
||||
AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
|
||||
AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
|
||||
AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
|
||||
AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
|
||||
AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
|
||||
AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
|
||||
bnez $a2, .Loop_hchacha_xor_rounds
|
||||
|
||||
/* Restore used register */
|
||||
lw X11, 0($sp)
|
||||
|
||||
sw X0, 0(OUT)
|
||||
sw X1, 4(OUT)
|
||||
sw X2, 8(OUT)
|
||||
sw X3, 12(OUT)
|
||||
sw X12, 16(OUT)
|
||||
sw X13, 20(OUT)
|
||||
sw X14, 24(OUT)
|
||||
sw X15, 28(OUT)
|
||||
|
||||
addiu $sp, STACK_SIZE
|
||||
jr $ra
|
||||
.end hchacha_block_arch
|
||||
.set at
|
|
@ -0,0 +1,150 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* MIPS accelerated ChaCha and XChaCha stream ciphers,
|
||||
* including ChaCha20 (RFC7539)
|
||||
*
|
||||
* Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/chacha.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int bytes, int nrounds);
|
||||
EXPORT_SYMBOL(chacha_crypt_arch);
|
||||
|
||||
asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds);
|
||||
EXPORT_SYMBOL(hchacha_block_arch);
|
||||
|
||||
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
|
||||
{
|
||||
chacha_init_generic(state, key, iv);
|
||||
}
|
||||
EXPORT_SYMBOL(chacha_init_arch);
|
||||
|
||||
static int chacha_mips_stream_xor(struct skcipher_request *req,
|
||||
const struct chacha_ctx *ctx, const u8 *iv)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u32 state[16];
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
chacha_init_generic(state, ctx->key, iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
|
||||
chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes, ctx->nrounds);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int chacha_mips(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return chacha_mips_stream_xor(req, ctx, req->iv);
|
||||
}
|
||||
|
||||
static int xchacha_mips(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct chacha_ctx subctx;
|
||||
u32 state[16];
|
||||
u8 real_iv[16];
|
||||
|
||||
chacha_init_generic(state, ctx->key, req->iv);
|
||||
|
||||
hchacha_block(state, subctx.key, ctx->nrounds);
|
||||
subctx.nrounds = ctx->nrounds;
|
||||
|
||||
memcpy(&real_iv[0], req->iv + 24, 8);
|
||||
memcpy(&real_iv[8], req->iv + 16, 8);
|
||||
return chacha_mips_stream_xor(req, &subctx, real_iv);
|
||||
}
|
||||
|
||||
static struct skcipher_alg algs[] = {
|
||||
{
|
||||
.base.cra_name = "chacha20",
|
||||
.base.cra_driver_name = "chacha20-mips",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = CHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = chacha_mips,
|
||||
.decrypt = chacha_mips,
|
||||
}, {
|
||||
.base.cra_name = "xchacha20",
|
||||
.base.cra_driver_name = "xchacha20-mips",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = xchacha_mips,
|
||||
.decrypt = xchacha_mips,
|
||||
}, {
|
||||
.base.cra_name = "xchacha12",
|
||||
.base.cra_driver_name = "xchacha12-mips",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct chacha_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = CHACHA_KEY_SIZE,
|
||||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = chacha12_setkey,
|
||||
.encrypt = xchacha_mips,
|
||||
.decrypt = xchacha_mips,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init chacha_simd_mod_init(void)
|
||||
{
|
||||
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
static void __exit chacha_simd_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
module_init(chacha_simd_mod_init);
|
||||
module_exit(chacha_simd_mod_fini);
|
||||
|
||||
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)");
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("chacha20");
|
||||
MODULE_ALIAS_CRYPTO("chacha20-mips");
|
||||
MODULE_ALIAS_CRYPTO("xchacha20");
|
||||
MODULE_ALIAS_CRYPTO("xchacha20-mips");
|
||||
MODULE_ALIAS_CRYPTO("xchacha12");
|
||||
MODULE_ALIAS_CRYPTO("xchacha12-mips");
|
|
@ -0,0 +1,203 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS
|
||||
*
|
||||
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/poly1305.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
asmlinkage void poly1305_init_mips(void *state, const u8 *key);
|
||||
asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
|
||||
asmlinkage void poly1305_emit_mips(void *state, __le32 *digest, const u32 *nonce);
|
||||
|
||||
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
|
||||
{
|
||||
poly1305_init_mips(&dctx->h, key);
|
||||
dctx->s[0] = get_unaligned_le32(key + 16);
|
||||
dctx->s[1] = get_unaligned_le32(key + 20);
|
||||
dctx->s[2] = get_unaligned_le32(key + 24);
|
||||
dctx->s[3] = get_unaligned_le32(key + 28);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_init_arch);
|
||||
|
||||
static int mips_poly1305_init(struct shash_desc *desc)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
dctx->buflen = 0;
|
||||
dctx->rset = 0;
|
||||
dctx->sset = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mips_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
|
||||
u32 len, u32 hibit)
|
||||
{
|
||||
if (unlikely(!dctx->sset)) {
|
||||
if (!dctx->rset) {
|
||||
poly1305_init_mips(&dctx->h, src);
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
len -= POLY1305_BLOCK_SIZE;
|
||||
dctx->rset = 1;
|
||||
}
|
||||
if (len >= POLY1305_BLOCK_SIZE) {
|
||||
dctx->s[0] = get_unaligned_le32(src + 0);
|
||||
dctx->s[1] = get_unaligned_le32(src + 4);
|
||||
dctx->s[2] = get_unaligned_le32(src + 8);
|
||||
dctx->s[3] = get_unaligned_le32(src + 12);
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
len -= POLY1305_BLOCK_SIZE;
|
||||
dctx->sset = true;
|
||||
}
|
||||
if (len < POLY1305_BLOCK_SIZE)
|
||||
return;
|
||||
}
|
||||
|
||||
len &= ~(POLY1305_BLOCK_SIZE - 1);
|
||||
|
||||
poly1305_blocks_mips(&dctx->h, src, len, hibit);
|
||||
}
|
||||
|
||||
static int mips_poly1305_update(struct shash_desc *desc, const u8 *src,
|
||||
unsigned int len)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
if (unlikely(dctx->buflen)) {
|
||||
u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
|
||||
memcpy(dctx->buf + dctx->buflen, src, bytes);
|
||||
src += bytes;
|
||||
len -= bytes;
|
||||
dctx->buflen += bytes;
|
||||
|
||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||
mips_poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 1);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(len >= POLY1305_BLOCK_SIZE)) {
|
||||
mips_poly1305_blocks(dctx, src, len, 1);
|
||||
src += round_down(len, POLY1305_BLOCK_SIZE);
|
||||
len %= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (unlikely(len)) {
|
||||
dctx->buflen = len;
|
||||
memcpy(dctx->buf, src, len);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
if (unlikely(dctx->buflen)) {
|
||||
u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
|
||||
memcpy(dctx->buf + dctx->buflen, src, bytes);
|
||||
src += bytes;
|
||||
nbytes -= bytes;
|
||||
dctx->buflen += bytes;
|
||||
|
||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||
poly1305_blocks_mips(&dctx->h, dctx->buf,
|
||||
POLY1305_BLOCK_SIZE, 1);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
|
||||
unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
|
||||
|
||||
poly1305_blocks_mips(&dctx->h, src, len, 1);
|
||||
src += len;
|
||||
nbytes %= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (unlikely(nbytes)) {
|
||||
dctx->buflen = nbytes;
|
||||
memcpy(dctx->buf, src, nbytes);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_update_arch);
|
||||
|
||||
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
|
||||
{
|
||||
__le32 digest[4];
|
||||
u64 f = 0;
|
||||
|
||||
if (unlikely(dctx->buflen)) {
|
||||
dctx->buf[dctx->buflen++] = 1;
|
||||
memset(dctx->buf + dctx->buflen, 0,
|
||||
POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
||||
}
|
||||
|
||||
poly1305_emit_mips(&dctx->h, digest, dctx->s);
|
||||
|
||||
/* mac = (h + s) % (2^128) */
|
||||
f = (f >> 32) + le32_to_cpu(digest[0]);
|
||||
put_unaligned_le32(f, dst);
|
||||
f = (f >> 32) + le32_to_cpu(digest[1]);
|
||||
put_unaligned_le32(f, dst + 4);
|
||||
f = (f >> 32) + le32_to_cpu(digest[2]);
|
||||
put_unaligned_le32(f, dst + 8);
|
||||
f = (f >> 32) + le32_to_cpu(digest[3]);
|
||||
put_unaligned_le32(f, dst + 12);
|
||||
|
||||
*dctx = (struct poly1305_desc_ctx){};
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_final_arch);
|
||||
|
||||
static int mips_poly1305_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
if (unlikely(!dctx->sset))
|
||||
return -ENOKEY;
|
||||
|
||||
poly1305_final_arch(dctx, dst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg mips_poly1305_alg = {
|
||||
.init = mips_poly1305_init,
|
||||
.update = mips_poly1305_update,
|
||||
.final = mips_poly1305_final,
|
||||
.digestsize = POLY1305_DIGEST_SIZE,
|
||||
.descsize = sizeof(struct poly1305_desc_ctx),
|
||||
|
||||
.base.cra_name = "poly1305",
|
||||
.base.cra_driver_name = "poly1305-mips",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = POLY1305_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init mips_poly1305_mod_init(void)
|
||||
{
|
||||
return crypto_register_shash(&mips_poly1305_alg);
|
||||
}
|
||||
|
||||
static void __exit mips_poly1305_mod_exit(void)
|
||||
{
|
||||
crypto_unregister_shash(&mips_poly1305_alg);
|
||||
}
|
||||
|
||||
module_init(mips_poly1305_mod_init);
|
||||
module_exit(mips_poly1305_mod_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("poly1305");
|
||||
MODULE_ALIAS_CRYPTO("poly1305-mips");
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -17,7 +17,10 @@
|
|||
#include <asm/byteorder.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
|
||||
/*
|
||||
* MAX_BYTES defines the number of bytes that are allowed to be processed
|
||||
|
@ -118,13 +121,19 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int ppc_aes_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *in_key, unsigned int key_len)
|
||||
{
|
||||
return ppc_aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len);
|
||||
}
|
||||
|
||||
static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct ppc_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = xts_check_key(tfm, in_key, key_len);
|
||||
err = xts_verify_key(tfm, in_key, key_len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -133,7 +142,7 @@ static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
if (key_len != AES_KEYSIZE_128 &&
|
||||
key_len != AES_KEYSIZE_192 &&
|
||||
key_len != AES_KEYSIZE_256) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -178,208 +187,229 @@ static void ppc_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|||
spe_end();
|
||||
}
|
||||
|
||||
static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ppc_ecb_crypt(struct skcipher_request *req, bool enc)
|
||||
{
|
||||
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int ubytes;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
ubytes = nbytes > MAX_BYTES ?
|
||||
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
|
||||
nbytes -= ubytes;
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
|
||||
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
|
||||
|
||||
spe_begin();
|
||||
ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_enc, ctx->rounds, nbytes);
|
||||
if (enc)
|
||||
ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_enc, ctx->rounds, nbytes);
|
||||
else
|
||||
ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_dec, ctx->rounds, nbytes);
|
||||
spe_end();
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, ubytes);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ppc_ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int ubytes;
|
||||
return ppc_ecb_crypt(req, true);
|
||||
}
|
||||
|
||||
static int ppc_ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return ppc_ecb_crypt(req, false);
|
||||
}
|
||||
|
||||
static int ppc_cbc_crypt(struct skcipher_request *req, bool enc)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
ubytes = nbytes > MAX_BYTES ?
|
||||
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
|
||||
nbytes -= ubytes;
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
|
||||
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
|
||||
|
||||
spe_begin();
|
||||
ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_dec, ctx->rounds, nbytes);
|
||||
if (enc)
|
||||
ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_enc, ctx->rounds, nbytes,
|
||||
walk.iv);
|
||||
else
|
||||
ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_dec, ctx->rounds, nbytes,
|
||||
walk.iv);
|
||||
spe_end();
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, ubytes);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ppc_cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int ubytes;
|
||||
int err;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
ubytes = nbytes > MAX_BYTES ?
|
||||
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
|
||||
nbytes -= ubytes;
|
||||
|
||||
spe_begin();
|
||||
ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_enc, ctx->rounds, nbytes, walk.iv);
|
||||
spe_end();
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, ubytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
return ppc_cbc_crypt(req, true);
|
||||
}
|
||||
|
||||
static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ppc_cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int ubytes;
|
||||
int err;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
ubytes = nbytes > MAX_BYTES ?
|
||||
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
|
||||
nbytes -= ubytes;
|
||||
|
||||
spe_begin();
|
||||
ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_dec, ctx->rounds, nbytes, walk.iv);
|
||||
spe_end();
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, ubytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
return ppc_cbc_crypt(req, false);
|
||||
}
|
||||
|
||||
static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ppc_ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int pbytes, ubytes;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((pbytes = walk.nbytes)) {
|
||||
pbytes = pbytes > MAX_BYTES ? MAX_BYTES : pbytes;
|
||||
pbytes = pbytes == nbytes ?
|
||||
nbytes : pbytes & ~(AES_BLOCK_SIZE - 1);
|
||||
ubytes = walk.nbytes - pbytes;
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
|
||||
|
||||
spe_begin();
|
||||
ppc_crypt_ctr(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_enc, ctx->rounds, pbytes , walk.iv);
|
||||
ctx->key_enc, ctx->rounds, nbytes, walk.iv);
|
||||
spe_end();
|
||||
|
||||
nbytes -= pbytes;
|
||||
err = blkcipher_walk_done(desc, &walk, ubytes);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ppc_xts_crypt(struct skcipher_request *req, bool enc)
|
||||
{
|
||||
struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int ubytes;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
u32 *twk;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
twk = ctx->key_twk;
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
ubytes = nbytes > MAX_BYTES ?
|
||||
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
|
||||
nbytes -= ubytes;
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
|
||||
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
|
||||
|
||||
spe_begin();
|
||||
ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_enc, ctx->rounds, nbytes, walk.iv, twk);
|
||||
if (enc)
|
||||
ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_enc, ctx->rounds, nbytes,
|
||||
walk.iv, twk);
|
||||
else
|
||||
ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_dec, ctx->rounds, nbytes,
|
||||
walk.iv, twk);
|
||||
spe_end();
|
||||
|
||||
twk = NULL;
|
||||
err = blkcipher_walk_done(desc, &walk, ubytes);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ppc_xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int ubytes;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int tail = req->cryptlen % AES_BLOCK_SIZE;
|
||||
int offset = req->cryptlen - tail - AES_BLOCK_SIZE;
|
||||
struct skcipher_request subreq;
|
||||
u8 b[2][AES_BLOCK_SIZE];
|
||||
int err;
|
||||
u32 *twk;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
twk = ctx->key_twk;
|
||||
if (req->cryptlen < AES_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
ubytes = nbytes > MAX_BYTES ?
|
||||
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
|
||||
nbytes -= ubytes;
|
||||
|
||||
spe_begin();
|
||||
ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key_dec, ctx->rounds, nbytes, walk.iv, twk);
|
||||
spe_end();
|
||||
|
||||
twk = NULL;
|
||||
err = blkcipher_walk_done(desc, &walk, ubytes);
|
||||
if (tail) {
|
||||
subreq = *req;
|
||||
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
||||
req->cryptlen - tail, req->iv);
|
||||
req = &subreq;
|
||||
}
|
||||
|
||||
return err;
|
||||
err = ppc_xts_crypt(req, true);
|
||||
if (err || !tail)
|
||||
return err;
|
||||
|
||||
scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE, 0);
|
||||
memcpy(b[1], b[0], tail);
|
||||
scatterwalk_map_and_copy(b[0], req->src, offset + AES_BLOCK_SIZE, tail, 0);
|
||||
|
||||
spe_begin();
|
||||
ppc_encrypt_xts(b[0], b[0], ctx->key_enc, ctx->rounds, AES_BLOCK_SIZE,
|
||||
req->iv, NULL);
|
||||
spe_end();
|
||||
|
||||
scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ppc_xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int tail = req->cryptlen % AES_BLOCK_SIZE;
|
||||
int offset = req->cryptlen - tail - AES_BLOCK_SIZE;
|
||||
struct skcipher_request subreq;
|
||||
u8 b[3][AES_BLOCK_SIZE];
|
||||
le128 twk;
|
||||
int err;
|
||||
|
||||
if (req->cryptlen < AES_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (tail) {
|
||||
subreq = *req;
|
||||
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
||||
offset, req->iv);
|
||||
req = &subreq;
|
||||
}
|
||||
|
||||
err = ppc_xts_crypt(req, false);
|
||||
if (err || !tail)
|
||||
return err;
|
||||
|
||||
scatterwalk_map_and_copy(b[1], req->src, offset, AES_BLOCK_SIZE + tail, 0);
|
||||
|
||||
spe_begin();
|
||||
if (!offset)
|
||||
ppc_encrypt_ecb(req->iv, req->iv, ctx->key_twk, ctx->rounds,
|
||||
AES_BLOCK_SIZE);
|
||||
|
||||
gf128mul_x_ble(&twk, (le128 *)req->iv);
|
||||
|
||||
ppc_decrypt_xts(b[1], b[1], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE,
|
||||
(u8 *)&twk, NULL);
|
||||
memcpy(b[0], b[2], tail);
|
||||
memcpy(b[0] + tail, b[1] + tail, AES_BLOCK_SIZE - tail);
|
||||
ppc_decrypt_xts(b[0], b[0], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE,
|
||||
req->iv, NULL);
|
||||
spe_end();
|
||||
|
||||
scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -388,9 +418,9 @@ static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
|||
* This improves IPsec thoughput by another few percent. Additionally we assume
|
||||
* that AES context is always aligned to at least 8 bytes because it is created
|
||||
* with kmalloc() in the crypto infrastructure
|
||||
*
|
||||
*/
|
||||
static struct crypto_alg aes_algs[] = { {
|
||||
|
||||
static struct crypto_alg aes_cipher_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-ppc-spe",
|
||||
.cra_priority = 300,
|
||||
|
@ -408,96 +438,84 @@ static struct crypto_alg aes_algs[] = { {
|
|||
.cia_decrypt = ppc_aes_decrypt
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-ppc-spe",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct ppc_aes_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ppc_aes_setkey,
|
||||
.encrypt = ppc_ecb_encrypt,
|
||||
.decrypt = ppc_ecb_decrypt,
|
||||
}
|
||||
};
|
||||
|
||||
static struct skcipher_alg aes_skcipher_algs[] = {
|
||||
{
|
||||
.base.cra_name = "ecb(aes)",
|
||||
.base.cra_driver_name = "ecb-ppc-spe",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = ppc_aes_setkey_skcipher,
|
||||
.encrypt = ppc_ecb_encrypt,
|
||||
.decrypt = ppc_ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "cbc(aes)",
|
||||
.base.cra_driver_name = "cbc-ppc-spe",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ppc_aes_setkey_skcipher,
|
||||
.encrypt = ppc_cbc_encrypt,
|
||||
.decrypt = ppc_cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "ctr(aes)",
|
||||
.base.cra_driver_name = "ctr-ppc-spe",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ppc_aes_setkey_skcipher,
|
||||
.encrypt = ppc_ctr_crypt,
|
||||
.decrypt = ppc_ctr_crypt,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
}, {
|
||||
.base.cra_name = "xts(aes)",
|
||||
.base.cra_driver_name = "xts-ppc-spe",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct ppc_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = AES_MAX_KEY_SIZE * 2,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ppc_xts_setkey,
|
||||
.encrypt = ppc_xts_encrypt,
|
||||
.decrypt = ppc_xts_decrypt,
|
||||
}
|
||||
}, {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-ppc-spe",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct ppc_aes_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ppc_aes_setkey,
|
||||
.encrypt = ppc_cbc_encrypt,
|
||||
.decrypt = ppc_cbc_decrypt,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-ppc-spe",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct ppc_aes_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ppc_aes_setkey,
|
||||
.encrypt = ppc_ctr_crypt,
|
||||
.decrypt = ppc_ctr_crypt,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "xts(aes)",
|
||||
.cra_driver_name = "xts-ppc-spe",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct ppc_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = AES_MAX_KEY_SIZE * 2,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ppc_xts_setkey,
|
||||
.encrypt = ppc_xts_encrypt,
|
||||
.decrypt = ppc_xts_decrypt,
|
||||
}
|
||||
}
|
||||
} };
|
||||
};
|
||||
|
||||
static int __init ppc_aes_mod_init(void)
|
||||
{
|
||||
return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
int err;
|
||||
|
||||
err = crypto_register_alg(&aes_cipher_alg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = crypto_register_skciphers(aes_skcipher_algs,
|
||||
ARRAY_SIZE(aes_skcipher_algs));
|
||||
if (err)
|
||||
crypto_unregister_alg(&aes_cipher_alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit ppc_aes_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
crypto_unregister_alg(&aes_cipher_alg);
|
||||
crypto_unregister_skciphers(aes_skcipher_algs,
|
||||
ARRAY_SIZE(aes_skcipher_algs));
|
||||
}
|
||||
|
||||
module_init(ppc_aes_mod_init);
|
||||
|
|
|
@ -164,7 +164,7 @@ config ARCH_RV32I
|
|||
config ARCH_RV64I
|
||||
bool "RV64I"
|
||||
select 64BIT
|
||||
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
|
|
|
@ -44,7 +44,7 @@ struct s390_aes_ctx {
|
|||
int key_len;
|
||||
unsigned long fc;
|
||||
union {
|
||||
struct crypto_sync_skcipher *blk;
|
||||
struct crypto_skcipher *skcipher;
|
||||
struct crypto_cipher *cip;
|
||||
} fallback;
|
||||
};
|
||||
|
@ -54,7 +54,7 @@ struct s390_xts_ctx {
|
|||
u8 pcc_key[32];
|
||||
int key_len;
|
||||
unsigned long fc;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct crypto_skcipher *fallback;
|
||||
};
|
||||
|
||||
struct gcm_sg_walk {
|
||||
|
@ -178,66 +178,41 @@ static struct crypto_alg aes_alg = {
|
|||
}
|
||||
};
|
||||
|
||||
static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int len)
|
||||
static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
unsigned int ret;
|
||||
|
||||
crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
|
||||
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
|
||||
CRYPTO_TFM_RES_MASK;
|
||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
crypto_skcipher_clear_flags(sctx->fallback.skcipher,
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(sctx->fallback.skcipher,
|
||||
crypto_skcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
ret = crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
|
||||
crypto_skcipher_set_flags(tfm,
|
||||
crypto_skcipher_get_flags(sctx->fallback.skcipher) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fallback_blk_dec(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
|
||||
struct skcipher_request *req,
|
||||
unsigned long modifier)
|
||||
{
|
||||
unsigned int ret;
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
|
||||
struct skcipher_request *subreq = skcipher_request_ctx(req);
|
||||
|
||||
skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
|
||||
ret = crypto_skcipher_decrypt(req);
|
||||
|
||||
skcipher_request_zero(req);
|
||||
return ret;
|
||||
*subreq = *req;
|
||||
skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
|
||||
return (modifier & CPACF_DECRYPT) ?
|
||||
crypto_skcipher_decrypt(subreq) :
|
||||
crypto_skcipher_encrypt(subreq);
|
||||
}
|
||||
|
||||
static int fallback_blk_enc(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
unsigned int ret;
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
|
||||
|
||||
skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
|
||||
ret = crypto_skcipher_encrypt(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||
unsigned long fc;
|
||||
|
||||
/* Pick the correct function code based on the key length */
|
||||
|
@ -248,111 +223,92 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
/* Check if the function code is available */
|
||||
sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
|
||||
if (!sctx->fc)
|
||||
return setkey_fallback_blk(tfm, in_key, key_len);
|
||||
return setkey_fallback_skcipher(tfm, in_key, key_len);
|
||||
|
||||
sctx->key_len = key_len;
|
||||
memcpy(sctx->key, in_key, key_len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
||||
struct blkcipher_walk *walk)
|
||||
static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes, n;
|
||||
int ret;
|
||||
|
||||
ret = blkcipher_walk_virt(desc, walk);
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
if (unlikely(!sctx->fc))
|
||||
return fallback_skcipher_crypt(sctx, req, modifier);
|
||||
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
/* only use complete blocks */
|
||||
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
||||
cpacf_km(sctx->fc | modifier, sctx->key,
|
||||
walk->dst.virt.addr, walk->src.virt.addr, n);
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
walk.dst.virt.addr, walk.src.virt.addr, n);
|
||||
ret = skcipher_walk_done(&walk, nbytes - n);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ecb_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_aes_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(!sctx->fc))
|
||||
return fallback_blk_enc(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_aes_crypt(desc, 0, &walk);
|
||||
return ecb_aes_crypt(req, 0);
|
||||
}
|
||||
|
||||
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_aes_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(!sctx->fc))
|
||||
return fallback_blk_dec(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
|
||||
return ecb_aes_crypt(req, CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static int fallback_init_blk(struct crypto_tfm *tfm)
|
||||
static int fallback_init_skcipher(struct crypto_skcipher *tfm)
|
||||
{
|
||||
const char *name = tfm->__crt_alg->cra_name;
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
const char *name = crypto_tfm_alg_name(&tfm->base);
|
||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
|
||||
|
||||
if (IS_ERR(sctx->fallback.blk)) {
|
||||
if (IS_ERR(sctx->fallback.skcipher)) {
|
||||
pr_err("Allocating AES fallback algorithm %s failed\n",
|
||||
name);
|
||||
return PTR_ERR(sctx->fallback.blk);
|
||||
return PTR_ERR(sctx->fallback.skcipher);
|
||||
}
|
||||
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
|
||||
crypto_skcipher_reqsize(sctx->fallback.skcipher));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fallback_exit_blk(struct crypto_tfm *tfm)
|
||||
static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_free_sync_skcipher(sctx->fallback.blk);
|
||||
crypto_free_skcipher(sctx->fallback.skcipher);
|
||||
}
|
||||
|
||||
static struct crypto_alg ecb_aes_alg = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-s390",
|
||||
.cra_priority = 401, /* combo: aes + ecb + 1 */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = fallback_init_blk,
|
||||
.cra_exit = fallback_exit_blk,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = ecb_aes_set_key,
|
||||
.encrypt = ecb_aes_encrypt,
|
||||
.decrypt = ecb_aes_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg ecb_aes_alg = {
|
||||
.base.cra_name = "ecb(aes)",
|
||||
.base.cra_driver_name = "ecb-aes-s390",
|
||||
.base.cra_priority = 401, /* combo: aes + ecb + 1 */
|
||||
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.init = fallback_init_skcipher,
|
||||
.exit = fallback_exit_skcipher,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = ecb_aes_set_key,
|
||||
.encrypt = ecb_aes_encrypt,
|
||||
.decrypt = ecb_aes_decrypt,
|
||||
};
|
||||
|
||||
static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||
unsigned long fc;
|
||||
|
||||
/* Pick the correct function code based on the key length */
|
||||
|
@ -363,17 +319,18 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
/* Check if the function code is available */
|
||||
sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
|
||||
if (!sctx->fc)
|
||||
return setkey_fallback_blk(tfm, in_key, key_len);
|
||||
return setkey_fallback_skcipher(tfm, in_key, key_len);
|
||||
|
||||
sctx->key_len = key_len;
|
||||
memcpy(sctx->key, in_key, key_len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
||||
struct blkcipher_walk *walk)
|
||||
static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes, n;
|
||||
int ret;
|
||||
struct {
|
||||
|
@ -381,134 +338,74 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|||
u8 key[AES_MAX_KEY_SIZE];
|
||||
} param;
|
||||
|
||||
ret = blkcipher_walk_virt(desc, walk);
|
||||
memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
|
||||
if (unlikely(!sctx->fc))
|
||||
return fallback_skcipher_crypt(sctx, req, modifier);
|
||||
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
|
||||
memcpy(param.key, sctx->key, sctx->key_len);
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
/* only use complete blocks */
|
||||
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
||||
cpacf_kmc(sctx->fc | modifier, ¶m,
|
||||
walk->dst.virt.addr, walk->src.virt.addr, n);
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
walk.dst.virt.addr, walk.src.virt.addr, n);
|
||||
memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_done(&walk, nbytes - n);
|
||||
}
|
||||
memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_aes_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(!sctx->fc))
|
||||
return fallback_blk_enc(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_aes_crypt(desc, 0, &walk);
|
||||
return cbc_aes_crypt(req, 0);
|
||||
}
|
||||
|
||||
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_aes_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(!sctx->fc))
|
||||
return fallback_blk_dec(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
|
||||
return cbc_aes_crypt(req, CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static struct crypto_alg cbc_aes_alg = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-aes-s390",
|
||||
.cra_priority = 402, /* ecb-aes-s390 + 1 */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = fallback_init_blk,
|
||||
.cra_exit = fallback_exit_blk,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = cbc_aes_set_key,
|
||||
.encrypt = cbc_aes_encrypt,
|
||||
.decrypt = cbc_aes_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg cbc_aes_alg = {
|
||||
.base.cra_name = "cbc(aes)",
|
||||
.base.cra_driver_name = "cbc-aes-s390",
|
||||
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
|
||||
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.init = fallback_init_skcipher,
|
||||
.exit = fallback_exit_skcipher,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = cbc_aes_set_key,
|
||||
.encrypt = cbc_aes_encrypt,
|
||||
.decrypt = cbc_aes_decrypt,
|
||||
};
|
||||
|
||||
static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int len)
|
||||
static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
||||
unsigned int ret;
|
||||
|
||||
crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
|
||||
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
|
||||
CRYPTO_TFM_RES_MASK;
|
||||
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(xts_ctx->fallback,
|
||||
crypto_skcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
|
||||
crypto_skcipher_set_flags(tfm,
|
||||
crypto_skcipher_get_flags(xts_ctx->fallback) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_fallback_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
|
||||
unsigned int ret;
|
||||
|
||||
skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
|
||||
ret = crypto_skcipher_decrypt(req);
|
||||
|
||||
skcipher_request_zero(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_fallback_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
|
||||
unsigned int ret;
|
||||
|
||||
skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
|
||||
ret = crypto_skcipher_encrypt(req);
|
||||
|
||||
skcipher_request_zero(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
||||
unsigned long fc;
|
||||
int err;
|
||||
|
||||
|
@ -518,7 +415,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
|
||||
/* In fips mode only 128 bit or 256 bit keys are valid */
|
||||
if (fips_enabled && key_len != 32 && key_len != 64) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -539,10 +436,11 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
||||
struct blkcipher_walk *walk)
|
||||
static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
|
||||
{
|
||||
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int offset, nbytes, n;
|
||||
int ret;
|
||||
struct {
|
||||
|
@ -557,113 +455,100 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|||
u8 init[16];
|
||||
} xts_param;
|
||||
|
||||
ret = blkcipher_walk_virt(desc, walk);
|
||||
if (req->cryptlen < AES_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
|
||||
struct skcipher_request *subreq = skcipher_request_ctx(req);
|
||||
|
||||
*subreq = *req;
|
||||
skcipher_request_set_tfm(subreq, xts_ctx->fallback);
|
||||
return (modifier & CPACF_DECRYPT) ?
|
||||
crypto_skcipher_decrypt(subreq) :
|
||||
crypto_skcipher_encrypt(subreq);
|
||||
}
|
||||
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
offset = xts_ctx->key_len & 0x10;
|
||||
memset(pcc_param.block, 0, sizeof(pcc_param.block));
|
||||
memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
|
||||
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
|
||||
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
|
||||
memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
|
||||
memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
|
||||
cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
|
||||
|
||||
memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
|
||||
memcpy(xts_param.init, pcc_param.xts, 16);
|
||||
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
/* only use complete blocks */
|
||||
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
||||
cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
|
||||
walk->dst.virt.addr, walk->src.virt.addr, n);
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
walk.dst.virt.addr, walk.src.virt.addr, n);
|
||||
ret = skcipher_walk_done(&walk, nbytes - n);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int xts_aes_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (!nbytes)
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
|
||||
return xts_fallback_encrypt(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return xts_aes_crypt(desc, 0, &walk);
|
||||
return xts_aes_crypt(req, 0);
|
||||
}
|
||||
|
||||
static int xts_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int xts_aes_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (!nbytes)
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
|
||||
return xts_fallback_decrypt(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
|
||||
return xts_aes_crypt(req, CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static int xts_fallback_init(struct crypto_tfm *tfm)
|
||||
static int xts_fallback_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
const char *name = tfm->__crt_alg->cra_name;
|
||||
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
||||
const char *name = crypto_tfm_alg_name(&tfm->base);
|
||||
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
|
||||
|
||||
if (IS_ERR(xts_ctx->fallback)) {
|
||||
pr_err("Allocating XTS fallback algorithm %s failed\n",
|
||||
name);
|
||||
return PTR_ERR(xts_ctx->fallback);
|
||||
}
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
|
||||
crypto_skcipher_reqsize(xts_ctx->fallback));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xts_fallback_exit(struct crypto_tfm *tfm)
|
||||
static void xts_fallback_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_free_sync_skcipher(xts_ctx->fallback);
|
||||
crypto_free_skcipher(xts_ctx->fallback);
|
||||
}
|
||||
|
||||
static struct crypto_alg xts_aes_alg = {
|
||||
.cra_name = "xts(aes)",
|
||||
.cra_driver_name = "xts-aes-s390",
|
||||
.cra_priority = 402, /* ecb-aes-s390 + 1 */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_xts_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = xts_fallback_init,
|
||||
.cra_exit = xts_fallback_exit,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = xts_aes_set_key,
|
||||
.encrypt = xts_aes_encrypt,
|
||||
.decrypt = xts_aes_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg xts_aes_alg = {
|
||||
.base.cra_name = "xts(aes)",
|
||||
.base.cra_driver_name = "xts-aes-s390",
|
||||
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
|
||||
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.init = xts_fallback_init,
|
||||
.exit = xts_fallback_exit,
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = xts_aes_set_key,
|
||||
.encrypt = xts_aes_encrypt,
|
||||
.decrypt = xts_aes_decrypt,
|
||||
};
|
||||
|
||||
static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||
unsigned long fc;
|
||||
|
||||
/* Pick the correct function code based on the key length */
|
||||
|
@ -674,7 +559,7 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
/* Check if the function code is available */
|
||||
sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
|
||||
if (!sctx->fc)
|
||||
return setkey_fallback_blk(tfm, in_key, key_len);
|
||||
return setkey_fallback_skcipher(tfm, in_key, key_len);
|
||||
|
||||
sctx->key_len = key_len;
|
||||
memcpy(sctx->key, in_key, key_len);
|
||||
|
@ -696,30 +581,34 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
|
|||
return n;
|
||||
}
|
||||
|
||||
static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
||||
struct blkcipher_walk *walk)
|
||||
static int ctr_aes_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
||||
u8 buf[AES_BLOCK_SIZE], *ctrptr;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int n, nbytes;
|
||||
int ret, locked;
|
||||
|
||||
if (unlikely(!sctx->fc))
|
||||
return fallback_skcipher_crypt(sctx, req, 0);
|
||||
|
||||
locked = mutex_trylock(&ctrblk_lock);
|
||||
|
||||
ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
||||
n = AES_BLOCK_SIZE;
|
||||
|
||||
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
|
||||
n = __ctrblk_init(ctrblk, walk->iv, nbytes);
|
||||
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
|
||||
cpacf_kmctr(sctx->fc | modifier, sctx->key,
|
||||
walk->dst.virt.addr, walk->src.virt.addr,
|
||||
n, ctrptr);
|
||||
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
|
||||
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
|
||||
cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, n, ctrptr);
|
||||
if (ctrptr == ctrblk)
|
||||
memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
|
||||
memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
|
||||
AES_BLOCK_SIZE);
|
||||
crypto_inc(walk->iv, AES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_done(&walk, nbytes - n);
|
||||
}
|
||||
if (locked)
|
||||
mutex_unlock(&ctrblk_lock);
|
||||
|
@ -727,67 +616,33 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|||
* final block may be < AES_BLOCK_SIZE, copy only nbytes
|
||||
*/
|
||||
if (nbytes) {
|
||||
cpacf_kmctr(sctx->fc | modifier, sctx->key,
|
||||
buf, walk->src.virt.addr,
|
||||
AES_BLOCK_SIZE, walk->iv);
|
||||
memcpy(walk->dst.virt.addr, buf, nbytes);
|
||||
crypto_inc(walk->iv, AES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, 0);
|
||||
cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
|
||||
AES_BLOCK_SIZE, walk.iv);
|
||||
memcpy(walk.dst.virt.addr, buf, nbytes);
|
||||
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ctr_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(!sctx->fc))
|
||||
return fallback_blk_enc(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ctr_aes_crypt(desc, 0, &walk);
|
||||
}
|
||||
|
||||
static int ctr_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(!sctx->fc))
|
||||
return fallback_blk_dec(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
|
||||
}
|
||||
|
||||
static struct crypto_alg ctr_aes_alg = {
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-aes-s390",
|
||||
.cra_priority = 402, /* ecb-aes-s390 + 1 */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = fallback_init_blk,
|
||||
.cra_exit = fallback_exit_blk,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ctr_aes_set_key,
|
||||
.encrypt = ctr_aes_encrypt,
|
||||
.decrypt = ctr_aes_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg ctr_aes_alg = {
|
||||
.base.cra_name = "ctr(aes)",
|
||||
.base.cra_driver_name = "ctr-aes-s390",
|
||||
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
|
||||
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.init = fallback_init_skcipher,
|
||||
.exit = fallback_exit_skcipher,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ctr_aes_set_key,
|
||||
.encrypt = ctr_aes_crypt,
|
||||
.decrypt = ctr_aes_crypt,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
};
|
||||
|
||||
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
|
@ -1116,24 +971,27 @@ static struct aead_alg gcm_aes_aead = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct crypto_alg *aes_s390_algs_ptr[5];
|
||||
static int aes_s390_algs_num;
|
||||
static struct crypto_alg *aes_s390_alg;
|
||||
static struct skcipher_alg *aes_s390_skcipher_algs[4];
|
||||
static int aes_s390_skciphers_num;
|
||||
static struct aead_alg *aes_s390_aead_alg;
|
||||
|
||||
static int aes_s390_register_alg(struct crypto_alg *alg)
|
||||
static int aes_s390_register_skcipher(struct skcipher_alg *alg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = crypto_register_alg(alg);
|
||||
ret = crypto_register_skcipher(alg);
|
||||
if (!ret)
|
||||
aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
|
||||
aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void aes_s390_fini(void)
|
||||
{
|
||||
while (aes_s390_algs_num--)
|
||||
crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
|
||||
if (aes_s390_alg)
|
||||
crypto_unregister_alg(aes_s390_alg);
|
||||
while (aes_s390_skciphers_num--)
|
||||
crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
|
||||
if (ctrblk)
|
||||
free_page((unsigned long) ctrblk);
|
||||
|
||||
|
@ -1154,10 +1012,11 @@ static int __init aes_s390_init(void)
|
|||
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
|
||||
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
|
||||
cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
|
||||
ret = aes_s390_register_alg(&aes_alg);
|
||||
ret = crypto_register_alg(&aes_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
ret = aes_s390_register_alg(&ecb_aes_alg);
|
||||
aes_s390_alg = &aes_alg;
|
||||
ret = aes_s390_register_skcipher(&ecb_aes_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -1165,14 +1024,14 @@ static int __init aes_s390_init(void)
|
|||
if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
|
||||
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
|
||||
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
|
||||
ret = aes_s390_register_alg(&cbc_aes_alg);
|
||||
ret = aes_s390_register_skcipher(&cbc_aes_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
|
||||
cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
|
||||
ret = aes_s390_register_alg(&xts_aes_alg);
|
||||
ret = aes_s390_register_skcipher(&xts_aes_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -1185,7 +1044,7 @@ static int __init aes_s390_init(void)
|
|||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
ret = aes_s390_register_alg(&ctr_aes_alg);
|
||||
ret = aes_s390_register_skcipher(&ctr_aes_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/des.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <asm/cpacf.h>
|
||||
|
||||
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
|
||||
|
@ -45,6 +46,12 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int des_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
return des_setkey(crypto_skcipher_tfm(tfm), key, key_len);
|
||||
}
|
||||
|
||||
static void s390_des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
@ -79,28 +86,30 @@ static struct crypto_alg des_alg = {
|
|||
}
|
||||
};
|
||||
|
||||
static int ecb_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
|
||||
struct blkcipher_walk *walk)
|
||||
static int ecb_desall_crypt(struct skcipher_request *req, unsigned long fc)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes, n;
|
||||
int ret;
|
||||
|
||||
ret = blkcipher_walk_virt(desc, walk);
|
||||
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
/* only use complete blocks */
|
||||
n = nbytes & ~(DES_BLOCK_SIZE - 1);
|
||||
cpacf_km(fc, ctx->key, walk->dst.virt.addr,
|
||||
walk->src.virt.addr, n);
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
cpacf_km(fc, ctx->key, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, n);
|
||||
ret = skcipher_walk_done(&walk, nbytes - n);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
|
||||
struct blkcipher_walk *walk)
|
||||
static int cbc_desall_crypt(struct skcipher_request *req, unsigned long fc)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes, n;
|
||||
int ret;
|
||||
struct {
|
||||
|
@ -108,99 +117,69 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
|
|||
u8 key[DES3_KEY_SIZE];
|
||||
} param;
|
||||
|
||||
ret = blkcipher_walk_virt(desc, walk);
|
||||
memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy(param.iv, walk.iv, DES_BLOCK_SIZE);
|
||||
memcpy(param.key, ctx->key, DES3_KEY_SIZE);
|
||||
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
/* only use complete blocks */
|
||||
n = nbytes & ~(DES_BLOCK_SIZE - 1);
|
||||
cpacf_kmc(fc, ¶m, walk->dst.virt.addr,
|
||||
walk->src.virt.addr, n);
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
cpacf_kmc(fc, ¶m, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, n);
|
||||
memcpy(walk.iv, param.iv, DES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_done(&walk, nbytes - n);
|
||||
}
|
||||
memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ecb_des_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_des_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_desall_crypt(desc, CPACF_KM_DEA, &walk);
|
||||
return ecb_desall_crypt(req, CPACF_KM_DEA);
|
||||
}
|
||||
|
||||
static int ecb_des_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_des_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_desall_crypt(desc, CPACF_KM_DEA | CPACF_DECRYPT, &walk);
|
||||
return ecb_desall_crypt(req, CPACF_KM_DEA | CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static struct crypto_alg ecb_des_alg = {
|
||||
.cra_name = "ecb(des)",
|
||||
.cra_driver_name = "ecb-des-s390",
|
||||
.cra_priority = 400, /* combo: des + ecb */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.setkey = des_setkey,
|
||||
.encrypt = ecb_des_encrypt,
|
||||
.decrypt = ecb_des_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg ecb_des_alg = {
|
||||
.base.cra_name = "ecb(des)",
|
||||
.base.cra_driver_name = "ecb-des-s390",
|
||||
.base.cra_priority = 400, /* combo: des + ecb */
|
||||
.base.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.setkey = des_setkey_skcipher,
|
||||
.encrypt = ecb_des_encrypt,
|
||||
.decrypt = ecb_des_decrypt,
|
||||
};
|
||||
|
||||
static int cbc_des_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_des_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, CPACF_KMC_DEA, &walk);
|
||||
return cbc_desall_crypt(req, CPACF_KMC_DEA);
|
||||
}
|
||||
|
||||
static int cbc_des_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_des_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, CPACF_KMC_DEA | CPACF_DECRYPT, &walk);
|
||||
return cbc_desall_crypt(req, CPACF_KMC_DEA | CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static struct crypto_alg cbc_des_alg = {
|
||||
.cra_name = "cbc(des)",
|
||||
.cra_driver_name = "cbc-des-s390",
|
||||
.cra_priority = 400, /* combo: des + cbc */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des_setkey,
|
||||
.encrypt = cbc_des_encrypt,
|
||||
.decrypt = cbc_des_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg cbc_des_alg = {
|
||||
.base.cra_name = "cbc(des)",
|
||||
.base.cra_driver_name = "cbc-des-s390",
|
||||
.base.cra_priority = 400, /* combo: des + cbc */
|
||||
.base.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des_setkey_skcipher,
|
||||
.encrypt = cbc_des_encrypt,
|
||||
.decrypt = cbc_des_decrypt,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -232,6 +211,12 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int des3_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
return des3_setkey(crypto_skcipher_tfm(tfm), key, key_len);
|
||||
}
|
||||
|
||||
static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
@ -266,87 +251,53 @@ static struct crypto_alg des3_alg = {
|
|||
}
|
||||
};
|
||||
|
||||
static int ecb_des3_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_des3_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_desall_crypt(desc, CPACF_KM_TDEA_192, &walk);
|
||||
return ecb_desall_crypt(req, CPACF_KM_TDEA_192);
|
||||
}
|
||||
|
||||
static int ecb_des3_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_des3_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_desall_crypt(desc, CPACF_KM_TDEA_192 | CPACF_DECRYPT,
|
||||
&walk);
|
||||
return ecb_desall_crypt(req, CPACF_KM_TDEA_192 | CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static struct crypto_alg ecb_des3_alg = {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "ecb-des3_ede-s390",
|
||||
.cra_priority = 400, /* combo: des3 + ecb */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_KEY_SIZE,
|
||||
.max_keysize = DES3_KEY_SIZE,
|
||||
.setkey = des3_setkey,
|
||||
.encrypt = ecb_des3_encrypt,
|
||||
.decrypt = ecb_des3_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg ecb_des3_alg = {
|
||||
.base.cra_name = "ecb(des3_ede)",
|
||||
.base.cra_driver_name = "ecb-des3_ede-s390",
|
||||
.base.cra_priority = 400, /* combo: des3 + ecb */
|
||||
.base.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES3_KEY_SIZE,
|
||||
.max_keysize = DES3_KEY_SIZE,
|
||||
.setkey = des3_setkey_skcipher,
|
||||
.encrypt = ecb_des3_encrypt,
|
||||
.decrypt = ecb_des3_decrypt,
|
||||
};
|
||||
|
||||
static int cbc_des3_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_des3_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192, &walk);
|
||||
return cbc_desall_crypt(req, CPACF_KMC_TDEA_192);
|
||||
}
|
||||
|
||||
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_des3_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192 | CPACF_DECRYPT,
|
||||
&walk);
|
||||
return cbc_desall_crypt(req, CPACF_KMC_TDEA_192 | CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static struct crypto_alg cbc_des3_alg = {
|
||||
.cra_name = "cbc(des3_ede)",
|
||||
.cra_driver_name = "cbc-des3_ede-s390",
|
||||
.cra_priority = 400, /* combo: des3 + cbc */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_KEY_SIZE,
|
||||
.max_keysize = DES3_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des3_setkey,
|
||||
.encrypt = cbc_des3_encrypt,
|
||||
.decrypt = cbc_des3_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg cbc_des3_alg = {
|
||||
.base.cra_name = "cbc(des3_ede)",
|
||||
.base.cra_driver_name = "cbc-des3_ede-s390",
|
||||
.base.cra_priority = 400, /* combo: des3 + cbc */
|
||||
.base.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES3_KEY_SIZE,
|
||||
.max_keysize = DES3_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des3_setkey_skcipher,
|
||||
.encrypt = cbc_des3_encrypt,
|
||||
.decrypt = cbc_des3_decrypt,
|
||||
};
|
||||
|
||||
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
|
||||
|
@ -364,128 +315,90 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
|
|||
return n;
|
||||
}
|
||||
|
||||
static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
|
||||
struct blkcipher_walk *walk)
|
||||
static int ctr_desall_crypt(struct skcipher_request *req, unsigned long fc)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
u8 buf[DES_BLOCK_SIZE], *ctrptr;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int n, nbytes;
|
||||
int ret, locked;
|
||||
|
||||
locked = mutex_trylock(&ctrblk_lock);
|
||||
|
||||
ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
|
||||
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
while ((nbytes = walk.nbytes) >= DES_BLOCK_SIZE) {
|
||||
n = DES_BLOCK_SIZE;
|
||||
if (nbytes >= 2*DES_BLOCK_SIZE && locked)
|
||||
n = __ctrblk_init(ctrblk, walk->iv, nbytes);
|
||||
ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv;
|
||||
cpacf_kmctr(fc, ctx->key, walk->dst.virt.addr,
|
||||
walk->src.virt.addr, n, ctrptr);
|
||||
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
|
||||
ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk.iv;
|
||||
cpacf_kmctr(fc, ctx->key, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, n, ctrptr);
|
||||
if (ctrptr == ctrblk)
|
||||
memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE,
|
||||
memcpy(walk.iv, ctrptr + n - DES_BLOCK_SIZE,
|
||||
DES_BLOCK_SIZE);
|
||||
crypto_inc(walk->iv, DES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
crypto_inc(walk.iv, DES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_done(&walk, nbytes - n);
|
||||
}
|
||||
if (locked)
|
||||
mutex_unlock(&ctrblk_lock);
|
||||
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
|
||||
if (nbytes) {
|
||||
cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
|
||||
DES_BLOCK_SIZE, walk->iv);
|
||||
memcpy(walk->dst.virt.addr, buf, nbytes);
|
||||
crypto_inc(walk->iv, DES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, 0);
|
||||
cpacf_kmctr(fc, ctx->key, buf, walk.src.virt.addr,
|
||||
DES_BLOCK_SIZE, walk.iv);
|
||||
memcpy(walk.dst.virt.addr, buf, nbytes);
|
||||
crypto_inc(walk.iv, DES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ctr_des_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ctr_des_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ctr_desall_crypt(desc, CPACF_KMCTR_DEA, &walk);
|
||||
return ctr_desall_crypt(req, CPACF_KMCTR_DEA);
|
||||
}
|
||||
|
||||
static int ctr_des_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ctr_desall_crypt(desc, CPACF_KMCTR_DEA | CPACF_DECRYPT, &walk);
|
||||
}
|
||||
|
||||
static struct crypto_alg ctr_des_alg = {
|
||||
.cra_name = "ctr(des)",
|
||||
.cra_driver_name = "ctr-des-s390",
|
||||
.cra_priority = 400, /* combo: des + ctr */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des_setkey,
|
||||
.encrypt = ctr_des_encrypt,
|
||||
.decrypt = ctr_des_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg ctr_des_alg = {
|
||||
.base.cra_name = "ctr(des)",
|
||||
.base.cra_driver_name = "ctr-des-s390",
|
||||
.base.cra_priority = 400, /* combo: des + ctr */
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des_setkey_skcipher,
|
||||
.encrypt = ctr_des_crypt,
|
||||
.decrypt = ctr_des_crypt,
|
||||
.chunksize = DES_BLOCK_SIZE,
|
||||
};
|
||||
|
||||
static int ctr_des3_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ctr_des3_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192, &walk);
|
||||
return ctr_desall_crypt(req, CPACF_KMCTR_TDEA_192);
|
||||
}
|
||||
|
||||
static int ctr_des3_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192 | CPACF_DECRYPT,
|
||||
&walk);
|
||||
}
|
||||
|
||||
static struct crypto_alg ctr_des3_alg = {
|
||||
.cra_name = "ctr(des3_ede)",
|
||||
.cra_driver_name = "ctr-des3_ede-s390",
|
||||
.cra_priority = 400, /* combo: des3 + ede */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_KEY_SIZE,
|
||||
.max_keysize = DES3_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des3_setkey,
|
||||
.encrypt = ctr_des3_encrypt,
|
||||
.decrypt = ctr_des3_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg ctr_des3_alg = {
|
||||
.base.cra_name = "ctr(des3_ede)",
|
||||
.base.cra_driver_name = "ctr-des3_ede-s390",
|
||||
.base.cra_priority = 400, /* combo: des3 + ede */
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES3_KEY_SIZE,
|
||||
.max_keysize = DES3_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des3_setkey_skcipher,
|
||||
.encrypt = ctr_des3_crypt,
|
||||
.decrypt = ctr_des3_crypt,
|
||||
.chunksize = DES_BLOCK_SIZE,
|
||||
};
|
||||
|
||||
static struct crypto_alg *des_s390_algs_ptr[8];
|
||||
static struct crypto_alg *des_s390_algs_ptr[2];
|
||||
static int des_s390_algs_num;
|
||||
static struct skcipher_alg *des_s390_skciphers_ptr[6];
|
||||
static int des_s390_skciphers_num;
|
||||
|
||||
static int des_s390_register_alg(struct crypto_alg *alg)
|
||||
{
|
||||
|
@ -497,10 +410,22 @@ static int des_s390_register_alg(struct crypto_alg *alg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int des_s390_register_skcipher(struct skcipher_alg *alg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = crypto_register_skcipher(alg);
|
||||
if (!ret)
|
||||
des_s390_skciphers_ptr[des_s390_skciphers_num++] = alg;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void des_s390_exit(void)
|
||||
{
|
||||
while (des_s390_algs_num--)
|
||||
crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]);
|
||||
while (des_s390_skciphers_num--)
|
||||
crypto_unregister_skcipher(des_s390_skciphers_ptr[des_s390_skciphers_num]);
|
||||
if (ctrblk)
|
||||
free_page((unsigned long) ctrblk);
|
||||
}
|
||||
|
@ -518,12 +443,12 @@ static int __init des_s390_init(void)
|
|||
ret = des_s390_register_alg(&des_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
ret = des_s390_register_alg(&ecb_des_alg);
|
||||
ret = des_s390_register_skcipher(&ecb_des_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) {
|
||||
ret = des_s390_register_alg(&cbc_des_alg);
|
||||
ret = des_s390_register_skcipher(&cbc_des_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -531,12 +456,12 @@ static int __init des_s390_init(void)
|
|||
ret = des_s390_register_alg(&des3_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
ret = des_s390_register_alg(&ecb_des3_alg);
|
||||
ret = des_s390_register_skcipher(&ecb_des3_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) {
|
||||
ret = des_s390_register_alg(&cbc_des3_alg);
|
||||
ret = des_s390_register_skcipher(&cbc_des3_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -551,12 +476,12 @@ static int __init des_s390_init(void)
|
|||
}
|
||||
|
||||
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) {
|
||||
ret = des_s390_register_alg(&ctr_des_alg);
|
||||
ret = des_s390_register_skcipher(&ctr_des_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
|
||||
ret = des_s390_register_alg(&ctr_des3_alg);
|
||||
ret = des_s390_register_skcipher(&ctr_des3_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/cpufeature.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/cpacf.h>
|
||||
#include <asm/pkey.h>
|
||||
|
@ -123,27 +124,27 @@ static int __paes_set_key(struct s390_paes_ctx *ctx)
|
|||
return ctx->fc ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int ecb_paes_init(struct crypto_tfm *tfm)
|
||||
static int ecb_paes_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
ctx->kb.key = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ecb_paes_exit(struct crypto_tfm *tfm)
|
||||
static void ecb_paes_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
_free_kb_keybuf(&ctx->kb);
|
||||
}
|
||||
|
||||
static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
int rc;
|
||||
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
_free_kb_keybuf(&ctx->kb);
|
||||
rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
|
||||
|
@ -151,91 +152,75 @@ static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
return rc;
|
||||
|
||||
if (__paes_set_key(ctx)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ecb_paes_crypt(struct blkcipher_desc *desc,
|
||||
unsigned long modifier,
|
||||
struct blkcipher_walk *walk)
|
||||
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
||||
{
|
||||
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes, n, k;
|
||||
int ret;
|
||||
|
||||
ret = blkcipher_walk_virt(desc, walk);
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
/* only use complete blocks */
|
||||
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
||||
k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
|
||||
walk->dst.virt.addr, walk->src.virt.addr, n);
|
||||
walk.dst.virt.addr, walk.src.virt.addr, n);
|
||||
if (k)
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - k);
|
||||
ret = skcipher_walk_done(&walk, nbytes - k);
|
||||
if (k < n) {
|
||||
if (__paes_set_key(ctx) != 0)
|
||||
return blkcipher_walk_done(desc, walk, -EIO);
|
||||
return skcipher_walk_done(&walk, -EIO);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ecb_paes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_paes_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk);
|
||||
return ecb_paes_crypt(req, 0);
|
||||
}
|
||||
|
||||
static int ecb_paes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_paes_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk);
|
||||
return ecb_paes_crypt(req, CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static struct crypto_alg ecb_paes_alg = {
|
||||
.cra_name = "ecb(paes)",
|
||||
.cra_driver_name = "ecb-paes-s390",
|
||||
.cra_priority = 401, /* combo: aes + ecb + 1 */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_paes_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
|
||||
.cra_init = ecb_paes_init,
|
||||
.cra_exit = ecb_paes_exit,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = PAES_MIN_KEYSIZE,
|
||||
.max_keysize = PAES_MAX_KEYSIZE,
|
||||
.setkey = ecb_paes_set_key,
|
||||
.encrypt = ecb_paes_encrypt,
|
||||
.decrypt = ecb_paes_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg ecb_paes_alg = {
|
||||
.base.cra_name = "ecb(paes)",
|
||||
.base.cra_driver_name = "ecb-paes-s390",
|
||||
.base.cra_priority = 401, /* combo: aes + ecb + 1 */
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list),
|
||||
.init = ecb_paes_init,
|
||||
.exit = ecb_paes_exit,
|
||||
.min_keysize = PAES_MIN_KEYSIZE,
|
||||
.max_keysize = PAES_MAX_KEYSIZE,
|
||||
.setkey = ecb_paes_set_key,
|
||||
.encrypt = ecb_paes_encrypt,
|
||||
.decrypt = ecb_paes_decrypt,
|
||||
};
|
||||
|
||||
static int cbc_paes_init(struct crypto_tfm *tfm)
|
||||
static int cbc_paes_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
ctx->kb.key = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cbc_paes_exit(struct crypto_tfm *tfm)
|
||||
static void cbc_paes_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
_free_kb_keybuf(&ctx->kb);
|
||||
}
|
||||
|
@ -258,11 +243,11 @@ static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
|
|||
return ctx->fc ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
int rc;
|
||||
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
_free_kb_keybuf(&ctx->kb);
|
||||
rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
|
||||
|
@ -270,16 +255,17 @@ static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
return rc;
|
||||
|
||||
if (__cbc_paes_set_key(ctx)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
||||
struct blkcipher_walk *walk)
|
||||
static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
||||
{
|
||||
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes, n, k;
|
||||
int ret;
|
||||
struct {
|
||||
|
@ -287,73 +273,60 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|||
u8 key[MAXPROTKEYSIZE];
|
||||
} param;
|
||||
|
||||
ret = blkcipher_walk_virt(desc, walk);
|
||||
memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
|
||||
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
/* only use complete blocks */
|
||||
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
||||
k = cpacf_kmc(ctx->fc | modifier, ¶m,
|
||||
walk->dst.virt.addr, walk->src.virt.addr, n);
|
||||
if (k)
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - k);
|
||||
walk.dst.virt.addr, walk.src.virt.addr, n);
|
||||
if (k) {
|
||||
memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_done(&walk, nbytes - k);
|
||||
}
|
||||
if (k < n) {
|
||||
if (__cbc_paes_set_key(ctx) != 0)
|
||||
return blkcipher_walk_done(desc, walk, -EIO);
|
||||
return skcipher_walk_done(&walk, -EIO);
|
||||
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
|
||||
}
|
||||
}
|
||||
memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cbc_paes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_paes_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_paes_crypt(desc, 0, &walk);
|
||||
return cbc_paes_crypt(req, 0);
|
||||
}
|
||||
|
||||
static int cbc_paes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_paes_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk);
|
||||
return cbc_paes_crypt(req, CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static struct crypto_alg cbc_paes_alg = {
|
||||
.cra_name = "cbc(paes)",
|
||||
.cra_driver_name = "cbc-paes-s390",
|
||||
.cra_priority = 402, /* ecb-paes-s390 + 1 */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_paes_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
|
||||
.cra_init = cbc_paes_init,
|
||||
.cra_exit = cbc_paes_exit,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = PAES_MIN_KEYSIZE,
|
||||
.max_keysize = PAES_MAX_KEYSIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = cbc_paes_set_key,
|
||||
.encrypt = cbc_paes_encrypt,
|
||||
.decrypt = cbc_paes_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg cbc_paes_alg = {
|
||||
.base.cra_name = "cbc(paes)",
|
||||
.base.cra_driver_name = "cbc-paes-s390",
|
||||
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list),
|
||||
.init = cbc_paes_init,
|
||||
.exit = cbc_paes_exit,
|
||||
.min_keysize = PAES_MIN_KEYSIZE,
|
||||
.max_keysize = PAES_MAX_KEYSIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = cbc_paes_set_key,
|
||||
.encrypt = cbc_paes_encrypt,
|
||||
.decrypt = cbc_paes_decrypt,
|
||||
};
|
||||
|
||||
static int xts_paes_init(struct crypto_tfm *tfm)
|
||||
static int xts_paes_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
ctx->kb[0].key = NULL;
|
||||
ctx->kb[1].key = NULL;
|
||||
|
@ -361,9 +334,9 @@ static int xts_paes_init(struct crypto_tfm *tfm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void xts_paes_exit(struct crypto_tfm *tfm)
|
||||
static void xts_paes_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
_free_kb_keybuf(&ctx->kb[0]);
|
||||
_free_kb_keybuf(&ctx->kb[1]);
|
||||
|
@ -391,11 +364,11 @@ static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
|
|||
return ctx->fc ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int xts_key_len)
|
||||
{
|
||||
int rc;
|
||||
struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
u8 ckey[2 * AES_MAX_KEY_SIZE];
|
||||
unsigned int ckey_len, key_len;
|
||||
|
||||
|
@ -414,7 +387,7 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
return rc;
|
||||
|
||||
if (__xts_paes_set_key(ctx)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -427,13 +400,14 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
AES_KEYSIZE_128 : AES_KEYSIZE_256;
|
||||
memcpy(ckey, ctx->pk[0].protkey, ckey_len);
|
||||
memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
|
||||
return xts_check_key(tfm, ckey, 2*ckey_len);
|
||||
return xts_verify_key(tfm, ckey, 2*ckey_len);
|
||||
}
|
||||
|
||||
static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
||||
struct blkcipher_walk *walk)
|
||||
static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
||||
{
|
||||
struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int keylen, offset, nbytes, n, k;
|
||||
int ret;
|
||||
struct {
|
||||
|
@ -448,90 +422,76 @@ static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|||
u8 init[16];
|
||||
} xts_param;
|
||||
|
||||
ret = blkcipher_walk_virt(desc, walk);
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
|
||||
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
|
||||
retry:
|
||||
memset(&pcc_param, 0, sizeof(pcc_param));
|
||||
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
|
||||
memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
|
||||
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
|
||||
cpacf_pcc(ctx->fc, pcc_param.key + offset);
|
||||
|
||||
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
|
||||
memcpy(xts_param.init, pcc_param.xts, 16);
|
||||
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
/* only use complete blocks */
|
||||
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
||||
k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
|
||||
walk->dst.virt.addr, walk->src.virt.addr, n);
|
||||
walk.dst.virt.addr, walk.src.virt.addr, n);
|
||||
if (k)
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - k);
|
||||
ret = skcipher_walk_done(&walk, nbytes - k);
|
||||
if (k < n) {
|
||||
if (__xts_paes_set_key(ctx) != 0)
|
||||
return blkcipher_walk_done(desc, walk, -EIO);
|
||||
return skcipher_walk_done(&walk, -EIO);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_paes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int xts_paes_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return xts_paes_crypt(desc, 0, &walk);
|
||||
return xts_paes_crypt(req, 0);
|
||||
}
|
||||
|
||||
static int xts_paes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int xts_paes_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return xts_paes_crypt(desc, CPACF_DECRYPT, &walk);
|
||||
return xts_paes_crypt(req, CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static struct crypto_alg xts_paes_alg = {
|
||||
.cra_name = "xts(paes)",
|
||||
.cra_driver_name = "xts-paes-s390",
|
||||
.cra_priority = 402, /* ecb-paes-s390 + 1 */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s390_pxts_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
|
||||
.cra_init = xts_paes_init,
|
||||
.cra_exit = xts_paes_exit,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = 2 * PAES_MIN_KEYSIZE,
|
||||
.max_keysize = 2 * PAES_MAX_KEYSIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = xts_paes_set_key,
|
||||
.encrypt = xts_paes_encrypt,
|
||||
.decrypt = xts_paes_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg xts_paes_alg = {
|
||||
.base.cra_name = "xts(paes)",
|
||||
.base.cra_driver_name = "xts-paes-s390",
|
||||
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
|
||||
.init = xts_paes_init,
|
||||
.exit = xts_paes_exit,
|
||||
.min_keysize = 2 * PAES_MIN_KEYSIZE,
|
||||
.max_keysize = 2 * PAES_MAX_KEYSIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = xts_paes_set_key,
|
||||
.encrypt = xts_paes_encrypt,
|
||||
.decrypt = xts_paes_decrypt,
|
||||
};
|
||||
|
||||
static int ctr_paes_init(struct crypto_tfm *tfm)
|
||||
static int ctr_paes_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
ctx->kb.key = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ctr_paes_exit(struct crypto_tfm *tfm)
|
||||
static void ctr_paes_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
_free_kb_keybuf(&ctx->kb);
|
||||
}
|
||||
|
@ -555,11 +515,11 @@ static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
|
|||
return ctx->fc ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
int rc;
|
||||
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
_free_kb_keybuf(&ctx->kb);
|
||||
rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
|
||||
|
@ -567,7 +527,7 @@ static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
return rc;
|
||||
|
||||
if (__ctr_paes_set_key(ctx)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
@ -588,37 +548,37 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
|
|||
return n;
|
||||
}
|
||||
|
||||
static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
||||
struct blkcipher_walk *walk)
|
||||
static int ctr_paes_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
u8 buf[AES_BLOCK_SIZE], *ctrptr;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes, n, k;
|
||||
int ret, locked;
|
||||
|
||||
locked = spin_trylock(&ctrblk_lock);
|
||||
|
||||
ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
||||
n = AES_BLOCK_SIZE;
|
||||
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
|
||||
n = __ctrblk_init(ctrblk, walk->iv, nbytes);
|
||||
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
|
||||
k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
|
||||
walk->dst.virt.addr, walk->src.virt.addr,
|
||||
n, ctrptr);
|
||||
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
|
||||
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
|
||||
k = cpacf_kmctr(ctx->fc, ctx->pk.protkey, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, n, ctrptr);
|
||||
if (k) {
|
||||
if (ctrptr == ctrblk)
|
||||
memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
|
||||
memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
|
||||
AES_BLOCK_SIZE);
|
||||
crypto_inc(walk->iv, AES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_done(&walk, nbytes - n);
|
||||
}
|
||||
if (k < n) {
|
||||
if (__ctr_paes_set_key(ctx) != 0) {
|
||||
if (locked)
|
||||
spin_unlock(&ctrblk_lock);
|
||||
return blkcipher_walk_done(desc, walk, -EIO);
|
||||
return skcipher_walk_done(&walk, -EIO);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -629,80 +589,54 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|||
*/
|
||||
if (nbytes) {
|
||||
while (1) {
|
||||
if (cpacf_kmctr(ctx->fc | modifier,
|
||||
ctx->pk.protkey, buf,
|
||||
walk->src.virt.addr, AES_BLOCK_SIZE,
|
||||
walk->iv) == AES_BLOCK_SIZE)
|
||||
if (cpacf_kmctr(ctx->fc, ctx->pk.protkey, buf,
|
||||
walk.src.virt.addr, AES_BLOCK_SIZE,
|
||||
walk.iv) == AES_BLOCK_SIZE)
|
||||
break;
|
||||
if (__ctr_paes_set_key(ctx) != 0)
|
||||
return blkcipher_walk_done(desc, walk, -EIO);
|
||||
return skcipher_walk_done(&walk, -EIO);
|
||||
}
|
||||
memcpy(walk->dst.virt.addr, buf, nbytes);
|
||||
crypto_inc(walk->iv, AES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, 0);
|
||||
memcpy(walk.dst.virt.addr, buf, nbytes);
|
||||
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
||||
ret = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ctr_paes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ctr_paes_crypt(desc, 0, &walk);
|
||||
}
|
||||
|
||||
static int ctr_paes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk);
|
||||
}
|
||||
|
||||
static struct crypto_alg ctr_paes_alg = {
|
||||
.cra_name = "ctr(paes)",
|
||||
.cra_driver_name = "ctr-paes-s390",
|
||||
.cra_priority = 402, /* ecb-paes-s390 + 1 */
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct s390_paes_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
|
||||
.cra_init = ctr_paes_init,
|
||||
.cra_exit = ctr_paes_exit,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = PAES_MIN_KEYSIZE,
|
||||
.max_keysize = PAES_MAX_KEYSIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ctr_paes_set_key,
|
||||
.encrypt = ctr_paes_encrypt,
|
||||
.decrypt = ctr_paes_decrypt,
|
||||
}
|
||||
}
|
||||
static struct skcipher_alg ctr_paes_alg = {
|
||||
.base.cra_name = "ctr(paes)",
|
||||
.base.cra_driver_name = "ctr-paes-s390",
|
||||
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list),
|
||||
.init = ctr_paes_init,
|
||||
.exit = ctr_paes_exit,
|
||||
.min_keysize = PAES_MIN_KEYSIZE,
|
||||
.max_keysize = PAES_MAX_KEYSIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ctr_paes_set_key,
|
||||
.encrypt = ctr_paes_crypt,
|
||||
.decrypt = ctr_paes_crypt,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
};
|
||||
|
||||
static inline void __crypto_unregister_alg(struct crypto_alg *alg)
|
||||
static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
|
||||
{
|
||||
if (!list_empty(&alg->cra_list))
|
||||
crypto_unregister_alg(alg);
|
||||
if (!list_empty(&alg->base.cra_list))
|
||||
crypto_unregister_skcipher(alg);
|
||||
}
|
||||
|
||||
static void paes_s390_fini(void)
|
||||
{
|
||||
if (ctrblk)
|
||||
free_page((unsigned long) ctrblk);
|
||||
__crypto_unregister_alg(&ctr_paes_alg);
|
||||
__crypto_unregister_alg(&xts_paes_alg);
|
||||
__crypto_unregister_alg(&cbc_paes_alg);
|
||||
__crypto_unregister_alg(&ecb_paes_alg);
|
||||
__crypto_unregister_skcipher(&ctr_paes_alg);
|
||||
__crypto_unregister_skcipher(&xts_paes_alg);
|
||||
__crypto_unregister_skcipher(&cbc_paes_alg);
|
||||
__crypto_unregister_skcipher(&ecb_paes_alg);
|
||||
}
|
||||
|
||||
static int __init paes_s390_init(void)
|
||||
|
@ -717,7 +651,7 @@ static int __init paes_s390_init(void)
|
|||
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
|
||||
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
|
||||
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
|
||||
ret = crypto_register_alg(&ecb_paes_alg);
|
||||
ret = crypto_register_skcipher(&ecb_paes_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -725,14 +659,14 @@ static int __init paes_s390_init(void)
|
|||
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
|
||||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
|
||||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
|
||||
ret = crypto_register_alg(&cbc_paes_alg);
|
||||
ret = crypto_register_skcipher(&cbc_paes_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
|
||||
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
|
||||
ret = crypto_register_alg(&xts_paes_alg);
|
||||
ret = crypto_register_skcipher(&xts_paes_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -740,7 +674,7 @@ static int __init paes_s390_init(void)
|
|||
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
|
||||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
|
||||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
|
||||
ret = crypto_register_alg(&ctr_paes_alg);
|
||||
ret = crypto_register_skcipher(&ctr_paes_alg);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
|
||||
#include <asm/fpumacro.h>
|
||||
#include <asm/pstate.h>
|
||||
|
@ -197,6 +198,12 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
|
||||
}
|
||||
|
||||
static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
@ -211,131 +218,108 @@ static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
ctx->ops->decrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst);
|
||||
}
|
||||
|
||||
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->ops->load_encrypt_keys(&ctx->key[0]);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & AES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
ctx->ops->ecb_encrypt(&ctx->key[0],
|
||||
(const u64 *)walk.src.virt.addr,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len);
|
||||
}
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
ctx->ops->ecb_encrypt(&ctx->key[0], walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes, AES_BLOCK_SIZE));
|
||||
err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
u64 *key_end;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
const u64 *key_end;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->ops->load_decrypt_keys(&ctx->key[0]);
|
||||
key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & AES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
ctx->ops->ecb_decrypt(key_end,
|
||||
(const u64 *) walk.src.virt.addr,
|
||||
(u64 *) walk.dst.virt.addr, block_len);
|
||||
}
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
ctx->ops->ecb_decrypt(key_end, walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes, AES_BLOCK_SIZE));
|
||||
err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->ops->load_encrypt_keys(&ctx->key[0]);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & AES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
ctx->ops->cbc_encrypt(&ctx->key[0],
|
||||
(const u64 *)walk.src.virt.addr,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len, (u64 *) walk.iv);
|
||||
}
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
ctx->ops->cbc_encrypt(&ctx->key[0], walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes, AES_BLOCK_SIZE),
|
||||
walk.iv);
|
||||
err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
u64 *key_end;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
const u64 *key_end;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->ops->load_decrypt_keys(&ctx->key[0]);
|
||||
key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & AES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
ctx->ops->cbc_decrypt(key_end,
|
||||
(const u64 *) walk.src.virt.addr,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len, (u64 *) walk.iv);
|
||||
}
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
ctx->ops->cbc_decrypt(key_end, walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes, AES_BLOCK_SIZE),
|
||||
walk.iv);
|
||||
err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
|
||||
struct blkcipher_walk *walk)
|
||||
static void ctr_crypt_final(const struct crypto_sparc64_aes_ctx *ctx,
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
u8 *ctrblk = walk->iv;
|
||||
u64 keystream[AES_BLOCK_SIZE / sizeof(u64)];
|
||||
|
@ -349,40 +333,35 @@ static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
|
|||
crypto_inc(ctrblk, AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->ops->load_encrypt_keys(&ctx->key[0]);
|
||||
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
||||
unsigned int block_len = nbytes & AES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
ctx->ops->ctr_crypt(&ctx->key[0],
|
||||
(const u64 *)walk.src.virt.addr,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len, (u64 *) walk.iv);
|
||||
}
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
ctx->ops->ctr_crypt(&ctx->key[0], walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes, AES_BLOCK_SIZE),
|
||||
walk.iv);
|
||||
err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
if (walk.nbytes) {
|
||||
ctr_crypt_final(ctx, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg algs[] = { {
|
||||
static struct crypto_alg cipher_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
|
@ -400,66 +379,53 @@ static struct crypto_alg algs[] = { {
|
|||
.cia_decrypt = crypto_aes_decrypt
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = aes_set_key,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-aes-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aes_set_key,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-aes-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aes_set_key,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
};
|
||||
|
||||
static struct skcipher_alg skcipher_algs[] = {
|
||||
{
|
||||
.base.cra_name = "ecb(aes)",
|
||||
.base.cra_driver_name = "ecb-aes-sparc64",
|
||||
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = aes_set_key_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "cbc(aes)",
|
||||
.base.cra_driver_name = "cbc-aes-sparc64",
|
||||
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aes_set_key_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "ctr(aes)",
|
||||
.base.cra_driver_name = "ctr-aes-sparc64",
|
||||
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aes_set_key_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
}
|
||||
};
|
||||
|
||||
static bool __init sparc64_has_aes_opcode(void)
|
||||
{
|
||||
|
@ -477,17 +443,27 @@ static bool __init sparc64_has_aes_opcode(void)
|
|||
|
||||
static int __init aes_sparc64_mod_init(void)
|
||||
{
|
||||
if (sparc64_has_aes_opcode()) {
|
||||
pr_info("Using sparc64 aes opcodes optimized AES implementation\n");
|
||||
return crypto_register_algs(algs, ARRAY_SIZE(algs));
|
||||
int err;
|
||||
|
||||
if (!sparc64_has_aes_opcode()) {
|
||||
pr_info("sparc64 aes opcodes not available.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
pr_info("sparc64 aes opcodes not available.\n");
|
||||
return -ENODEV;
|
||||
pr_info("Using sparc64 aes opcodes optimized AES implementation\n");
|
||||
err = crypto_register_alg(&cipher_alg);
|
||||
if (err)
|
||||
return err;
|
||||
err = crypto_register_skciphers(skcipher_algs,
|
||||
ARRAY_SIZE(skcipher_algs));
|
||||
if (err)
|
||||
crypto_unregister_alg(&cipher_alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit aes_sparc64_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_algs(algs, ARRAY_SIZE(algs));
|
||||
crypto_unregister_alg(&cipher_alg);
|
||||
crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
|
||||
}
|
||||
|
||||
module_init(aes_sparc64_mod_init);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
|
||||
#include <asm/fpumacro.h>
|
||||
#include <asm/pstate.h>
|
||||
|
@ -52,6 +53,12 @@ static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int camellia_set_key_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *in_key, unsigned int key_len)
|
||||
{
|
||||
return camellia_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
|
||||
}
|
||||
|
||||
extern void camellia_sparc64_crypt(const u64 *key, const u32 *input,
|
||||
u32 *output, unsigned int key_len);
|
||||
|
||||
|
@ -81,61 +88,46 @@ typedef void ecb_crypt_op(const u64 *input, u64 *output, unsigned int len,
|
|||
extern ecb_crypt_op camellia_sparc64_ecb_crypt_3_grand_rounds;
|
||||
extern ecb_crypt_op camellia_sparc64_ecb_crypt_4_grand_rounds;
|
||||
|
||||
#define CAMELLIA_BLOCK_MASK (~(CAMELLIA_BLOCK_SIZE - 1))
|
||||
|
||||
static int __ecb_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes, bool encrypt)
|
||||
static int __ecb_crypt(struct skcipher_request *req, bool encrypt)
|
||||
{
|
||||
struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
ecb_crypt_op *op;
|
||||
const u64 *key;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
op = camellia_sparc64_ecb_crypt_3_grand_rounds;
|
||||
if (ctx->key_len != 16)
|
||||
op = camellia_sparc64_ecb_crypt_4_grand_rounds;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (encrypt)
|
||||
key = &ctx->encrypt_key[0];
|
||||
else
|
||||
key = &ctx->decrypt_key[0];
|
||||
camellia_sparc64_load_keys(key, ctx->key_len);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
const u64 *src64;
|
||||
u64 *dst64;
|
||||
|
||||
src64 = (const u64 *)walk.src.virt.addr;
|
||||
dst64 = (u64 *) walk.dst.virt.addr;
|
||||
op(src64, dst64, block_len, key);
|
||||
}
|
||||
nbytes &= CAMELLIA_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
op(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
round_down(nbytes, CAMELLIA_BLOCK_SIZE), key);
|
||||
err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __ecb_crypt(desc, dst, src, nbytes, true);
|
||||
return __ecb_crypt(req, true);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __ecb_crypt(desc, dst, src, nbytes, false);
|
||||
return __ecb_crypt(req, false);
|
||||
}
|
||||
|
||||
typedef void cbc_crypt_op(const u64 *input, u64 *output, unsigned int len,
|
||||
|
@ -146,85 +138,65 @@ extern cbc_crypt_op camellia_sparc64_cbc_encrypt_4_grand_rounds;
|
|||
extern cbc_crypt_op camellia_sparc64_cbc_decrypt_3_grand_rounds;
|
||||
extern cbc_crypt_op camellia_sparc64_cbc_decrypt_4_grand_rounds;
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
cbc_crypt_op *op;
|
||||
const u64 *key;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
op = camellia_sparc64_cbc_encrypt_3_grand_rounds;
|
||||
if (ctx->key_len != 16)
|
||||
op = camellia_sparc64_cbc_encrypt_4_grand_rounds;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
key = &ctx->encrypt_key[0];
|
||||
camellia_sparc64_load_keys(key, ctx->key_len);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
const u64 *src64;
|
||||
u64 *dst64;
|
||||
|
||||
src64 = (const u64 *)walk.src.virt.addr;
|
||||
dst64 = (u64 *) walk.dst.virt.addr;
|
||||
op(src64, dst64, block_len, key,
|
||||
(u64 *) walk.iv);
|
||||
}
|
||||
nbytes &= CAMELLIA_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
op(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv);
|
||||
err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
cbc_crypt_op *op;
|
||||
const u64 *key;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
op = camellia_sparc64_cbc_decrypt_3_grand_rounds;
|
||||
if (ctx->key_len != 16)
|
||||
op = camellia_sparc64_cbc_decrypt_4_grand_rounds;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
key = &ctx->decrypt_key[0];
|
||||
camellia_sparc64_load_keys(key, ctx->key_len);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
const u64 *src64;
|
||||
u64 *dst64;
|
||||
|
||||
src64 = (const u64 *)walk.src.virt.addr;
|
||||
dst64 = (u64 *) walk.dst.virt.addr;
|
||||
op(src64, dst64, block_len, key,
|
||||
(u64 *) walk.iv);
|
||||
}
|
||||
nbytes &= CAMELLIA_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
op(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv);
|
||||
err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg algs[] = { {
|
||||
static struct crypto_alg cipher_alg = {
|
||||
.cra_name = "camellia",
|
||||
.cra_driver_name = "camellia-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
|
@ -242,46 +214,37 @@ static struct crypto_alg algs[] = { {
|
|||
.cia_decrypt = camellia_decrypt
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ecb(camellia)",
|
||||
.cra_driver_name = "ecb-camellia-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_set_key,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(camellia)",
|
||||
.cra_driver_name = "cbc-camellia-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_set_key,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct skcipher_alg skcipher_algs[] = {
|
||||
{
|
||||
.base.cra_name = "ecb(camellia)",
|
||||
.base.cra_driver_name = "ecb-camellia-sparc64",
|
||||
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_set_key_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "cbc(camellia)",
|
||||
.base.cra_driver_name = "cbc-camellia-sparc64",
|
||||
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_set_key_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}
|
||||
};
|
||||
|
||||
static bool __init sparc64_has_camellia_opcode(void)
|
||||
|
@ -300,17 +263,27 @@ static bool __init sparc64_has_camellia_opcode(void)
|
|||
|
||||
static int __init camellia_sparc64_mod_init(void)
|
||||
{
|
||||
if (sparc64_has_camellia_opcode()) {
|
||||
pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n");
|
||||
return crypto_register_algs(algs, ARRAY_SIZE(algs));
|
||||
int err;
|
||||
|
||||
if (!sparc64_has_camellia_opcode()) {
|
||||
pr_info("sparc64 camellia opcodes not available.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
pr_info("sparc64 camellia opcodes not available.\n");
|
||||
return -ENODEV;
|
||||
pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n");
|
||||
err = crypto_register_alg(&cipher_alg);
|
||||
if (err)
|
||||
return err;
|
||||
err = crypto_register_skciphers(skcipher_algs,
|
||||
ARRAY_SIZE(skcipher_algs));
|
||||
if (err)
|
||||
crypto_unregister_alg(&cipher_alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit camellia_sparc64_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_algs(algs, ARRAY_SIZE(algs));
|
||||
crypto_unregister_alg(&cipher_alg);
|
||||
crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
|
||||
}
|
||||
|
||||
module_init(camellia_sparc64_mod_init);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/des.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
|
||||
#include <asm/fpumacro.h>
|
||||
#include <asm/pstate.h>
|
||||
|
@ -61,6 +62,12 @@ static int des_set_key(struct crypto_tfm *tfm, const u8 *key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int des_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return des_set_key(crypto_skcipher_tfm(tfm), key, keylen);
|
||||
}
|
||||
|
||||
extern void des_sparc64_crypt(const u64 *key, const u64 *input,
|
||||
u64 *output);
|
||||
|
||||
|
@ -85,113 +92,90 @@ extern void des_sparc64_load_keys(const u64 *key);
|
|||
extern void des_sparc64_ecb_crypt(const u64 *input, u64 *output,
|
||||
unsigned int len);
|
||||
|
||||
#define DES_BLOCK_MASK (~(DES_BLOCK_SIZE - 1))
|
||||
|
||||
static int __ecb_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes, bool encrypt)
|
||||
static int __ecb_crypt(struct skcipher_request *req, bool encrypt)
|
||||
{
|
||||
struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (encrypt)
|
||||
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
|
||||
else
|
||||
des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & DES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
des_sparc64_ecb_crypt((const u64 *)walk.src.virt.addr,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len);
|
||||
}
|
||||
nbytes &= DES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
des_sparc64_ecb_crypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
round_down(nbytes, DES_BLOCK_SIZE));
|
||||
err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __ecb_crypt(desc, dst, src, nbytes, true);
|
||||
return __ecb_crypt(req, true);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __ecb_crypt(desc, dst, src, nbytes, false);
|
||||
return __ecb_crypt(req, false);
|
||||
}
|
||||
|
||||
extern void des_sparc64_cbc_encrypt(const u64 *input, u64 *output,
|
||||
unsigned int len, u64 *iv);
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output,
|
||||
unsigned int len, u64 *iv);
|
||||
|
||||
static int __cbc_crypt(struct skcipher_request *req, bool encrypt)
|
||||
{
|
||||
struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & DES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
des_sparc64_cbc_encrypt((const u64 *)walk.src.virt.addr,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len, (u64 *) walk.iv);
|
||||
}
|
||||
nbytes &= DES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
if (encrypt)
|
||||
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
|
||||
else
|
||||
des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
if (encrypt)
|
||||
des_sparc64_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes,
|
||||
DES_BLOCK_SIZE),
|
||||
walk.iv);
|
||||
else
|
||||
des_sparc64_cbc_decrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes,
|
||||
DES_BLOCK_SIZE),
|
||||
walk.iv);
|
||||
err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output,
|
||||
unsigned int len, u64 *iv);
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
return __cbc_crypt(req, true);
|
||||
}
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & DES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
des_sparc64_cbc_decrypt((const u64 *)walk.src.virt.addr,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len, (u64 *) walk.iv);
|
||||
}
|
||||
nbytes &= DES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __cbc_crypt(req, false);
|
||||
}
|
||||
|
||||
static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
|
||||
|
@ -227,6 +211,12 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int des3_ede_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return des3_ede_set_key(crypto_skcipher_tfm(tfm), key, keylen);
|
||||
}
|
||||
|
||||
extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input,
|
||||
u64 *output);
|
||||
|
||||
|
@ -251,241 +241,196 @@ extern void des3_ede_sparc64_load_keys(const u64 *key);
|
|||
extern void des3_ede_sparc64_ecb_crypt(const u64 *expkey, const u64 *input,
|
||||
u64 *output, unsigned int len);
|
||||
|
||||
static int __ecb3_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes, bool encrypt)
|
||||
static int __ecb3_crypt(struct skcipher_request *req, bool encrypt)
|
||||
{
|
||||
struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
const u64 *K;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (encrypt)
|
||||
K = &ctx->encrypt_expkey[0];
|
||||
else
|
||||
K = &ctx->decrypt_expkey[0];
|
||||
des3_ede_sparc64_load_keys(K);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & DES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
const u64 *src64 = (const u64 *)walk.src.virt.addr;
|
||||
des3_ede_sparc64_ecb_crypt(K, src64,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len);
|
||||
}
|
||||
nbytes &= DES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
des3_ede_sparc64_ecb_crypt(K, walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes, DES_BLOCK_SIZE));
|
||||
err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb3_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb3_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __ecb3_crypt(desc, dst, src, nbytes, true);
|
||||
return __ecb3_crypt(req, true);
|
||||
}
|
||||
|
||||
static int ecb3_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int ecb3_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __ecb3_crypt(desc, dst, src, nbytes, false);
|
||||
return __ecb3_crypt(req, false);
|
||||
}
|
||||
|
||||
extern void des3_ede_sparc64_cbc_encrypt(const u64 *expkey, const u64 *input,
|
||||
u64 *output, unsigned int len,
|
||||
u64 *iv);
|
||||
|
||||
static int cbc3_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
const u64 *K;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
K = &ctx->encrypt_expkey[0];
|
||||
des3_ede_sparc64_load_keys(K);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & DES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
const u64 *src64 = (const u64 *)walk.src.virt.addr;
|
||||
des3_ede_sparc64_cbc_encrypt(K, src64,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len,
|
||||
(u64 *) walk.iv);
|
||||
}
|
||||
nbytes &= DES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
extern void des3_ede_sparc64_cbc_decrypt(const u64 *expkey, const u64 *input,
|
||||
u64 *output, unsigned int len,
|
||||
u64 *iv);
|
||||
|
||||
static int cbc3_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
static int __cbc3_crypt(struct skcipher_request *req, bool encrypt)
|
||||
{
|
||||
struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
const u64 *K;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
K = &ctx->decrypt_expkey[0];
|
||||
if (encrypt)
|
||||
K = &ctx->encrypt_expkey[0];
|
||||
else
|
||||
K = &ctx->decrypt_expkey[0];
|
||||
des3_ede_sparc64_load_keys(K);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
unsigned int block_len = nbytes & DES_BLOCK_MASK;
|
||||
|
||||
if (likely(block_len)) {
|
||||
const u64 *src64 = (const u64 *)walk.src.virt.addr;
|
||||
des3_ede_sparc64_cbc_decrypt(K, src64,
|
||||
(u64 *) walk.dst.virt.addr,
|
||||
block_len,
|
||||
(u64 *) walk.iv);
|
||||
}
|
||||
nbytes &= DES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
if (encrypt)
|
||||
des3_ede_sparc64_cbc_encrypt(K, walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes,
|
||||
DES_BLOCK_SIZE),
|
||||
walk.iv);
|
||||
else
|
||||
des3_ede_sparc64_cbc_decrypt(K, walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes,
|
||||
DES_BLOCK_SIZE),
|
||||
walk.iv);
|
||||
err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
|
||||
}
|
||||
fprs_write(0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg algs[] = { {
|
||||
.cra_name = "des",
|
||||
.cra_driver_name = "des-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = DES_KEY_SIZE,
|
||||
.cia_max_keysize = DES_KEY_SIZE,
|
||||
.cia_setkey = des_set_key,
|
||||
.cia_encrypt = sparc_des_encrypt,
|
||||
.cia_decrypt = sparc_des_decrypt
|
||||
static int cbc3_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __cbc3_crypt(req, true);
|
||||
}
|
||||
|
||||
static int cbc3_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __cbc3_crypt(req, false);
|
||||
}
|
||||
|
||||
static struct crypto_alg cipher_algs[] = {
|
||||
{
|
||||
.cra_name = "des",
|
||||
.cra_driver_name = "des-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = DES_KEY_SIZE,
|
||||
.cia_max_keysize = DES_KEY_SIZE,
|
||||
.cia_setkey = des_set_key,
|
||||
.cia_encrypt = sparc_des_encrypt,
|
||||
.cia_decrypt = sparc_des_decrypt
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "des3_ede",
|
||||
.cra_driver_name = "des3_ede-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.cia_max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.cia_setkey = des3_ede_set_key,
|
||||
.cia_encrypt = sparc_des3_ede_encrypt,
|
||||
.cia_decrypt = sparc_des3_ede_decrypt
|
||||
}
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ecb(des)",
|
||||
.cra_driver_name = "ecb-des-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.setkey = des_set_key,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(des)",
|
||||
.cra_driver_name = "cbc-des-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des_set_key,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "des3_ede",
|
||||
.cra_driver_name = "des3_ede-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.cia_max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.cia_setkey = des3_ede_set_key,
|
||||
.cia_encrypt = sparc_des3_ede_encrypt,
|
||||
.cia_decrypt = sparc_des3_ede_decrypt
|
||||
}
|
||||
};
|
||||
|
||||
static struct skcipher_alg skcipher_algs[] = {
|
||||
{
|
||||
.base.cra_name = "ecb(des)",
|
||||
.base.cra_driver_name = "ecb-des-sparc64",
|
||||
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.base.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct des_sparc64_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.setkey = des_set_key_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "cbc(des)",
|
||||
.base.cra_driver_name = "cbc-des-sparc64",
|
||||
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.base.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct des_sparc64_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des_set_key_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "ecb(des3_ede)",
|
||||
.base.cra_driver_name = "ecb-des3_ede-sparc64",
|
||||
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = des3_ede_set_key_skcipher,
|
||||
.encrypt = ecb3_encrypt,
|
||||
.decrypt = ecb3_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "cbc(des3_ede)",
|
||||
.base.cra_driver_name = "cbc-des3_ede-sparc64",
|
||||
.base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = des3_ede_set_key_skcipher,
|
||||
.encrypt = cbc3_encrypt,
|
||||
.decrypt = cbc3_decrypt,
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "ecb-des3_ede-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = des3_ede_set_key,
|
||||
.encrypt = ecb3_encrypt,
|
||||
.decrypt = ecb3_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(des3_ede)",
|
||||
.cra_driver_name = "cbc-des3_ede-sparc64",
|
||||
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = des3_ede_set_key,
|
||||
.encrypt = cbc3_encrypt,
|
||||
.decrypt = cbc3_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
};
|
||||
|
||||
static bool __init sparc64_has_des_opcode(void)
|
||||
{
|
||||
|
@ -503,17 +448,27 @@ static bool __init sparc64_has_des_opcode(void)
|
|||
|
||||
static int __init des_sparc64_mod_init(void)
|
||||
{
|
||||
if (sparc64_has_des_opcode()) {
|
||||
pr_info("Using sparc64 des opcodes optimized DES implementation\n");
|
||||
return crypto_register_algs(algs, ARRAY_SIZE(algs));
|
||||
int err;
|
||||
|
||||
if (!sparc64_has_des_opcode()) {
|
||||
pr_info("sparc64 des opcodes not available.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
pr_info("sparc64 des opcodes not available.\n");
|
||||
return -ENODEV;
|
||||
pr_info("Using sparc64 des opcodes optimized DES implementation\n");
|
||||
err = crypto_register_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
|
||||
if (err)
|
||||
return err;
|
||||
err = crypto_register_skciphers(skcipher_algs,
|
||||
ARRAY_SIZE(skcipher_algs));
|
||||
if (err)
|
||||
crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit des_sparc64_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_algs(algs, ARRAY_SIZE(algs));
|
||||
crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
|
||||
crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
|
||||
}
|
||||
|
||||
module_init(des_sparc64_mod_init);
|
||||
|
|
|
@ -24,7 +24,7 @@ config X86_64
|
|||
depends on 64BIT
|
||||
# Options that are inherently 64-bit kernel only:
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_SUPPORTS_INT128
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select HAVE_ARCH_SOFT_DIRTY
|
||||
select MODULES_USE_ELF_RELA
|
||||
|
|
|
@ -39,6 +39,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
|
|||
|
||||
obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
|
||||
obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
|
||||
obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
|
||||
|
||||
# These modules require assembler to support AVX.
|
||||
ifeq ($(avx_supported),yes)
|
||||
|
@ -48,6 +49,7 @@ ifeq ($(avx_supported),yes)
|
|||
obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o
|
||||
endif
|
||||
|
||||
# These modules require assembler to support AVX2.
|
||||
|
@ -70,6 +72,7 @@ serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
|
|||
aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o
|
||||
|
||||
nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
|
||||
blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
|
||||
|
||||
ifeq ($(avx_supported),yes)
|
||||
camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \
|
||||
|
|
|
@ -0,0 +1,258 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
||||
/*
|
||||
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
* Copyright (C) 2017-2019 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.section .rodata.cst32.BLAKE2S_IV, "aM", @progbits, 32
|
||||
.align 32
|
||||
IV: .octa 0xA54FF53A3C6EF372BB67AE856A09E667
|
||||
.octa 0x5BE0CD191F83D9AB9B05688C510E527F
|
||||
.section .rodata.cst16.ROT16, "aM", @progbits, 16
|
||||
.align 16
|
||||
ROT16: .octa 0x0D0C0F0E09080B0A0504070601000302
|
||||
.section .rodata.cst16.ROR328, "aM", @progbits, 16
|
||||
.align 16
|
||||
ROR328: .octa 0x0C0F0E0D080B0A090407060500030201
|
||||
.section .rodata.cst64.BLAKE2S_SIGMA, "aM", @progbits, 160
|
||||
.align 64
|
||||
SIGMA:
|
||||
.byte 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13
|
||||
.byte 14, 4, 9, 13, 10, 8, 15, 6, 5, 1, 0, 11, 3, 12, 2, 7
|
||||
.byte 11, 12, 5, 15, 8, 0, 2, 13, 9, 10, 3, 7, 4, 14, 6, 1
|
||||
.byte 7, 3, 13, 11, 9, 1, 12, 14, 15, 2, 5, 4, 8, 6, 10, 0
|
||||
.byte 9, 5, 2, 10, 0, 7, 4, 15, 3, 14, 11, 6, 13, 1, 12, 8
|
||||
.byte 2, 6, 0, 8, 12, 10, 11, 3, 1, 4, 7, 15, 9, 13, 5, 14
|
||||
.byte 12, 1, 14, 4, 5, 15, 13, 10, 8, 0, 6, 9, 11, 7, 3, 2
|
||||
.byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6
|
||||
.byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4
|
||||
.byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12
|
||||
#ifdef CONFIG_AS_AVX512
|
||||
.section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 640
|
||||
.align 64
|
||||
SIGMA2:
|
||||
.long 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13
|
||||
.long 8, 2, 13, 15, 10, 9, 12, 3, 6, 4, 0, 14, 5, 11, 1, 7
|
||||
.long 11, 13, 8, 6, 5, 10, 14, 3, 2, 4, 12, 15, 1, 0, 7, 9
|
||||
.long 11, 10, 7, 0, 8, 15, 1, 13, 3, 6, 2, 12, 4, 14, 9, 5
|
||||
.long 4, 10, 9, 14, 15, 0, 11, 8, 1, 7, 3, 13, 2, 5, 6, 12
|
||||
.long 2, 11, 4, 15, 14, 3, 10, 8, 13, 6, 5, 7, 0, 12, 1, 9
|
||||
.long 4, 8, 15, 9, 14, 11, 13, 5, 3, 2, 1, 12, 6, 10, 7, 0
|
||||
.long 6, 13, 0, 14, 12, 2, 1, 11, 15, 4, 5, 8, 7, 9, 3, 10
|
||||
.long 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14
|
||||
.long 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9
|
||||
#endif /* CONFIG_AS_AVX512 */
|
||||
|
||||
.text
|
||||
#ifdef CONFIG_AS_SSSE3
|
||||
ENTRY(blake2s_compress_ssse3)
|
||||
testq %rdx,%rdx
|
||||
je .Lendofloop
|
||||
movdqu (%rdi),%xmm0
|
||||
movdqu 0x10(%rdi),%xmm1
|
||||
movdqa ROT16(%rip),%xmm12
|
||||
movdqa ROR328(%rip),%xmm13
|
||||
movdqu 0x20(%rdi),%xmm14
|
||||
movq %rcx,%xmm15
|
||||
leaq SIGMA+0xa0(%rip),%r8
|
||||
jmp .Lbeginofloop
|
||||
.align 32
|
||||
.Lbeginofloop:
|
||||
movdqa %xmm0,%xmm10
|
||||
movdqa %xmm1,%xmm11
|
||||
paddq %xmm15,%xmm14
|
||||
movdqa IV(%rip),%xmm2
|
||||
movdqa %xmm14,%xmm3
|
||||
pxor IV+0x10(%rip),%xmm3
|
||||
leaq SIGMA(%rip),%rcx
|
||||
.Lroundloop:
|
||||
movzbl (%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm4
|
||||
movzbl 0x1(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm5
|
||||
movzbl 0x2(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm6
|
||||
movzbl 0x3(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm7
|
||||
punpckldq %xmm5,%xmm4
|
||||
punpckldq %xmm7,%xmm6
|
||||
punpcklqdq %xmm6,%xmm4
|
||||
paddd %xmm4,%xmm0
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
pshufb %xmm12,%xmm3
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm8
|
||||
psrld $0xc,%xmm1
|
||||
pslld $0x14,%xmm8
|
||||
por %xmm8,%xmm1
|
||||
movzbl 0x4(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm5
|
||||
movzbl 0x5(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm6
|
||||
movzbl 0x6(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm7
|
||||
movzbl 0x7(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm4
|
||||
punpckldq %xmm6,%xmm5
|
||||
punpckldq %xmm4,%xmm7
|
||||
punpcklqdq %xmm7,%xmm5
|
||||
paddd %xmm5,%xmm0
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
pshufb %xmm13,%xmm3
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm8
|
||||
psrld $0x7,%xmm1
|
||||
pslld $0x19,%xmm8
|
||||
por %xmm8,%xmm1
|
||||
pshufd $0x93,%xmm0,%xmm0
|
||||
pshufd $0x4e,%xmm3,%xmm3
|
||||
pshufd $0x39,%xmm2,%xmm2
|
||||
movzbl 0x8(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm6
|
||||
movzbl 0x9(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm7
|
||||
movzbl 0xa(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm4
|
||||
movzbl 0xb(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm5
|
||||
punpckldq %xmm7,%xmm6
|
||||
punpckldq %xmm5,%xmm4
|
||||
punpcklqdq %xmm4,%xmm6
|
||||
paddd %xmm6,%xmm0
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
pshufb %xmm12,%xmm3
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm8
|
||||
psrld $0xc,%xmm1
|
||||
pslld $0x14,%xmm8
|
||||
por %xmm8,%xmm1
|
||||
movzbl 0xc(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm7
|
||||
movzbl 0xd(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm4
|
||||
movzbl 0xe(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm5
|
||||
movzbl 0xf(%rcx),%eax
|
||||
movd (%rsi,%rax,4),%xmm6
|
||||
punpckldq %xmm4,%xmm7
|
||||
punpckldq %xmm6,%xmm5
|
||||
punpcklqdq %xmm5,%xmm7
|
||||
paddd %xmm7,%xmm0
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
pshufb %xmm13,%xmm3
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm8
|
||||
psrld $0x7,%xmm1
|
||||
pslld $0x19,%xmm8
|
||||
por %xmm8,%xmm1
|
||||
pshufd $0x39,%xmm0,%xmm0
|
||||
pshufd $0x4e,%xmm3,%xmm3
|
||||
pshufd $0x93,%xmm2,%xmm2
|
||||
addq $0x10,%rcx
|
||||
cmpq %r8,%rcx
|
||||
jnz .Lroundloop
|
||||
pxor %xmm2,%xmm0
|
||||
pxor %xmm3,%xmm1
|
||||
pxor %xmm10,%xmm0
|
||||
pxor %xmm11,%xmm1
|
||||
addq $0x40,%rsi
|
||||
decq %rdx
|
||||
jnz .Lbeginofloop
|
||||
movdqu %xmm0,(%rdi)
|
||||
movdqu %xmm1,0x10(%rdi)
|
||||
movdqu %xmm14,0x20(%rdi)
|
||||
.Lendofloop:
|
||||
ret
|
||||
ENDPROC(blake2s_compress_ssse3)
|
||||
#endif /* CONFIG_AS_SSSE3 */
|
||||
|
||||
#ifdef CONFIG_AS_AVX512
|
||||
ENTRY(blake2s_compress_avx512)
|
||||
vmovdqu (%rdi),%xmm0
|
||||
vmovdqu 0x10(%rdi),%xmm1
|
||||
vmovdqu 0x20(%rdi),%xmm4
|
||||
vmovq %rcx,%xmm5
|
||||
vmovdqa IV(%rip),%xmm14
|
||||
vmovdqa IV+16(%rip),%xmm15
|
||||
jmp .Lblake2s_compress_avx512_mainloop
|
||||
.align 32
|
||||
.Lblake2s_compress_avx512_mainloop:
|
||||
vmovdqa %xmm0,%xmm10
|
||||
vmovdqa %xmm1,%xmm11
|
||||
vpaddq %xmm5,%xmm4,%xmm4
|
||||
vmovdqa %xmm14,%xmm2
|
||||
vpxor %xmm15,%xmm4,%xmm3
|
||||
vmovdqu (%rsi),%ymm6
|
||||
vmovdqu 0x20(%rsi),%ymm7
|
||||
addq $0x40,%rsi
|
||||
leaq SIGMA2(%rip),%rax
|
||||
movb $0xa,%cl
|
||||
.Lblake2s_compress_avx512_roundloop:
|
||||
addq $0x40,%rax
|
||||
vmovdqa -0x40(%rax),%ymm8
|
||||
vmovdqa -0x20(%rax),%ymm9
|
||||
vpermi2d %ymm7,%ymm6,%ymm8
|
||||
vpermi2d %ymm7,%ymm6,%ymm9
|
||||
vmovdqa %ymm8,%ymm6
|
||||
vmovdqa %ymm9,%ymm7
|
||||
vpaddd %xmm8,%xmm0,%xmm0
|
||||
vpaddd %xmm1,%xmm0,%xmm0
|
||||
vpxor %xmm0,%xmm3,%xmm3
|
||||
vprord $0x10,%xmm3,%xmm3
|
||||
vpaddd %xmm3,%xmm2,%xmm2
|
||||
vpxor %xmm2,%xmm1,%xmm1
|
||||
vprord $0xc,%xmm1,%xmm1
|
||||
vextracti128 $0x1,%ymm8,%xmm8
|
||||
vpaddd %xmm8,%xmm0,%xmm0
|
||||
vpaddd %xmm1,%xmm0,%xmm0
|
||||
vpxor %xmm0,%xmm3,%xmm3
|
||||
vprord $0x8,%xmm3,%xmm3
|
||||
vpaddd %xmm3,%xmm2,%xmm2
|
||||
vpxor %xmm2,%xmm1,%xmm1
|
||||
vprord $0x7,%xmm1,%xmm1
|
||||
vpshufd $0x93,%xmm0,%xmm0
|
||||
vpshufd $0x4e,%xmm3,%xmm3
|
||||
vpshufd $0x39,%xmm2,%xmm2
|
||||
vpaddd %xmm9,%xmm0,%xmm0
|
||||
vpaddd %xmm1,%xmm0,%xmm0
|
||||
vpxor %xmm0,%xmm3,%xmm3
|
||||
vprord $0x10,%xmm3,%xmm3
|
||||
vpaddd %xmm3,%xmm2,%xmm2
|
||||
vpxor %xmm2,%xmm1,%xmm1
|
||||
vprord $0xc,%xmm1,%xmm1
|
||||
vextracti128 $0x1,%ymm9,%xmm9
|
||||
vpaddd %xmm9,%xmm0,%xmm0
|
||||
vpaddd %xmm1,%xmm0,%xmm0
|
||||
vpxor %xmm0,%xmm3,%xmm3
|
||||
vprord $0x8,%xmm3,%xmm3
|
||||
vpaddd %xmm3,%xmm2,%xmm2
|
||||
vpxor %xmm2,%xmm1,%xmm1
|
||||
vprord $0x7,%xmm1,%xmm1
|
||||
vpshufd $0x39,%xmm0,%xmm0
|
||||
vpshufd $0x4e,%xmm3,%xmm3
|
||||
vpshufd $0x93,%xmm2,%xmm2
|
||||
decb %cl
|
||||
jne .Lblake2s_compress_avx512_roundloop
|
||||
vpxor %xmm10,%xmm0,%xmm0
|
||||
vpxor %xmm11,%xmm1,%xmm1
|
||||
vpxor %xmm2,%xmm0,%xmm0
|
||||
vpxor %xmm3,%xmm1,%xmm1
|
||||
decq %rdx
|
||||
jne .Lblake2s_compress_avx512_mainloop
|
||||
vmovdqu %xmm0,(%rdi)
|
||||
vmovdqu %xmm1,0x10(%rdi)
|
||||
vmovdqu %xmm4,0x20(%rdi)
|
||||
vzeroupper
|
||||
retq
|
||||
ENDPROC(blake2s_compress_avx512)
|
||||
#endif /* CONFIG_AS_AVX512 */
|
|
@ -0,0 +1,233 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/*
|
||||
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state,
|
||||
const u8 *block, const size_t nblocks,
|
||||
const u32 inc);
|
||||
asmlinkage void blake2s_compress_avx512(struct blake2s_state *state,
|
||||
const u8 *block, const size_t nblocks,
|
||||
const u32 inc);
|
||||
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3);
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512);
|
||||
|
||||
void blake2s_compress_arch(struct blake2s_state *state,
|
||||
const u8 *block, size_t nblocks,
|
||||
const u32 inc)
|
||||
{
|
||||
/* SIMD disables preemption, so relax after processing each page. */
|
||||
BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8);
|
||||
|
||||
if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) {
|
||||
blake2s_compress_generic(state, block, nblocks, inc);
|
||||
return;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
const size_t blocks = min_t(size_t, nblocks,
|
||||
PAGE_SIZE / BLAKE2S_BLOCK_SIZE);
|
||||
|
||||
kernel_fpu_begin();
|
||||
if (IS_ENABLED(CONFIG_AS_AVX512) &&
|
||||
static_branch_likely(&blake2s_use_avx512))
|
||||
blake2s_compress_avx512(state, block, blocks, inc);
|
||||
else
|
||||
blake2s_compress_ssse3(state, block, blocks, inc);
|
||||
kernel_fpu_end();
|
||||
|
||||
nblocks -= blocks;
|
||||
if (!nblocks)
|
||||
break;
|
||||
block += blocks * BLAKE2S_BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(blake2s_compress_arch);
|
||||
|
||||
static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
|
||||
|
||||
if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) {
|
||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(tctx->key, key, keylen);
|
||||
tctx->keylen = keylen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_blake2s_init(struct shash_desc *desc)
|
||||
{
|
||||
struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
const int outlen = crypto_shash_digestsize(desc->tfm);
|
||||
|
||||
if (tctx->keylen)
|
||||
blake2s_init_key(state, outlen, tctx->key, tctx->keylen);
|
||||
else
|
||||
blake2s_init(state, outlen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in,
|
||||
unsigned int inlen)
|
||||
{
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
|
||||
|
||||
if (unlikely(!inlen))
|
||||
return 0;
|
||||
if (inlen > fill) {
|
||||
memcpy(state->buf + state->buflen, in, fill);
|
||||
blake2s_compress_arch(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
|
||||
state->buflen = 0;
|
||||
in += fill;
|
||||
inlen -= fill;
|
||||
}
|
||||
if (inlen > BLAKE2S_BLOCK_SIZE) {
|
||||
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
|
||||
/* Hash one less (full) block than strictly possible */
|
||||
blake2s_compress_arch(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
|
||||
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
||||
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
||||
}
|
||||
memcpy(state->buf + state->buflen, in, inlen);
|
||||
state->buflen += inlen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
|
||||
blake2s_set_lastblock(state);
|
||||
memset(state->buf + state->buflen, 0,
|
||||
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
|
||||
blake2s_compress_arch(state, state->buf, 1, state->buflen);
|
||||
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
|
||||
memcpy(out, state->h, state->outlen);
|
||||
memzero_explicit(state, sizeof(*state));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_algs[] = {{
|
||||
.base.cra_name = "blake2s-128",
|
||||
.base.cra_driver_name = "blake2s-128-x86",
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = BLAKE2S_128_HASH_SIZE,
|
||||
.setkey = crypto_blake2s_setkey,
|
||||
.init = crypto_blake2s_init,
|
||||
.update = crypto_blake2s_update,
|
||||
.final = crypto_blake2s_final,
|
||||
.descsize = sizeof(struct blake2s_state),
|
||||
}, {
|
||||
.base.cra_name = "blake2s-160",
|
||||
.base.cra_driver_name = "blake2s-160-x86",
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = BLAKE2S_160_HASH_SIZE,
|
||||
.setkey = crypto_blake2s_setkey,
|
||||
.init = crypto_blake2s_init,
|
||||
.update = crypto_blake2s_update,
|
||||
.final = crypto_blake2s_final,
|
||||
.descsize = sizeof(struct blake2s_state),
|
||||
}, {
|
||||
.base.cra_name = "blake2s-224",
|
||||
.base.cra_driver_name = "blake2s-224-x86",
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = BLAKE2S_224_HASH_SIZE,
|
||||
.setkey = crypto_blake2s_setkey,
|
||||
.init = crypto_blake2s_init,
|
||||
.update = crypto_blake2s_update,
|
||||
.final = crypto_blake2s_final,
|
||||
.descsize = sizeof(struct blake2s_state),
|
||||
}, {
|
||||
.base.cra_name = "blake2s-256",
|
||||
.base.cra_driver_name = "blake2s-256-x86",
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = BLAKE2S_256_HASH_SIZE,
|
||||
.setkey = crypto_blake2s_setkey,
|
||||
.init = crypto_blake2s_init,
|
||||
.update = crypto_blake2s_update,
|
||||
.final = crypto_blake2s_final,
|
||||
.descsize = sizeof(struct blake2s_state),
|
||||
}};
|
||||
|
||||
static int __init blake2s_mod_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
return 0;
|
||||
|
||||
static_branch_enable(&blake2s_use_ssse3);
|
||||
|
||||
if (IS_ENABLED(CONFIG_AS_AVX512) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX2) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX512F) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX512VL) &&
|
||||
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM |
|
||||
XFEATURE_MASK_AVX512, NULL))
|
||||
static_branch_enable(&blake2s_use_avx512);
|
||||
|
||||
return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
}
|
||||
|
||||
static void __exit blake2s_mod_exit(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
}
|
||||
|
||||
module_init(blake2s_mod_init);
|
||||
module_exit(blake2s_mod_exit);
|
||||
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-x86");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -7,7 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/internal/chacha.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -21,24 +21,24 @@ asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
|
|||
asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int len, int nrounds);
|
||||
asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds);
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
|
||||
asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int len, int nrounds);
|
||||
asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int len, int nrounds);
|
||||
asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int len, int nrounds);
|
||||
static bool chacha_use_avx2;
|
||||
#ifdef CONFIG_AS_AVX512
|
||||
|
||||
asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int len, int nrounds);
|
||||
asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int len, int nrounds);
|
||||
asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int len, int nrounds);
|
||||
static bool chacha_use_avx512vl;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd);
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2);
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl);
|
||||
|
||||
static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
|
||||
{
|
||||
|
@ -49,9 +49,8 @@ static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
|
|||
static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int bytes, int nrounds)
|
||||
{
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
#ifdef CONFIG_AS_AVX512
|
||||
if (chacha_use_avx512vl) {
|
||||
if (IS_ENABLED(CONFIG_AS_AVX512) &&
|
||||
static_branch_likely(&chacha_use_avx512vl)) {
|
||||
while (bytes >= CHACHA_BLOCK_SIZE * 8) {
|
||||
chacha_8block_xor_avx512vl(state, dst, src, bytes,
|
||||
nrounds);
|
||||
|
@ -79,8 +78,9 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
|
|||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (chacha_use_avx2) {
|
||||
|
||||
if (IS_ENABLED(CONFIG_AS_AVX2) &&
|
||||
static_branch_likely(&chacha_use_avx2)) {
|
||||
while (bytes >= CHACHA_BLOCK_SIZE * 8) {
|
||||
chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
|
||||
bytes -= CHACHA_BLOCK_SIZE * 8;
|
||||
|
@ -104,7 +104,7 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
|
|||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
while (bytes >= CHACHA_BLOCK_SIZE * 4) {
|
||||
chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
|
||||
bytes -= CHACHA_BLOCK_SIZE * 4;
|
||||
|
@ -123,37 +123,76 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
|
|||
}
|
||||
}
|
||||
|
||||
static int chacha_simd_stream_xor(struct skcipher_walk *walk,
|
||||
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
|
||||
{
|
||||
state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
|
||||
|
||||
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
|
||||
hchacha_block_generic(state, stream, nrounds);
|
||||
} else {
|
||||
kernel_fpu_begin();
|
||||
hchacha_block_ssse3(state, stream, nrounds);
|
||||
kernel_fpu_end();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(hchacha_block_arch);
|
||||
|
||||
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
|
||||
{
|
||||
state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
|
||||
|
||||
chacha_init_generic(state, key, iv);
|
||||
}
|
||||
EXPORT_SYMBOL(chacha_init_arch);
|
||||
|
||||
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
|
||||
int nrounds)
|
||||
{
|
||||
state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
|
||||
|
||||
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
|
||||
bytes <= CHACHA_BLOCK_SIZE)
|
||||
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
|
||||
|
||||
kernel_fpu_begin();
|
||||
chacha_dosimd(state, dst, src, bytes, nrounds);
|
||||
kernel_fpu_end();
|
||||
}
|
||||
EXPORT_SYMBOL(chacha_crypt_arch);
|
||||
|
||||
static int chacha_simd_stream_xor(struct skcipher_request *req,
|
||||
const struct chacha_ctx *ctx, const u8 *iv)
|
||||
{
|
||||
u32 *state, state_buf[16 + 2] __aligned(8);
|
||||
int next_yield = 4096; /* bytes until next FPU yield */
|
||||
int err = 0;
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
||||
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
||||
|
||||
crypto_chacha_init(state, ctx, iv);
|
||||
chacha_init_generic(state, ctx->key, iv);
|
||||
|
||||
while (walk->nbytes > 0) {
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
if (nbytes < walk->total) {
|
||||
nbytes = round_down(nbytes, walk->stride);
|
||||
next_yield -= nbytes;
|
||||
}
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
|
||||
chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr,
|
||||
nbytes, ctx->nrounds);
|
||||
|
||||
if (next_yield <= 0) {
|
||||
/* temporarily allow preemption */
|
||||
kernel_fpu_end();
|
||||
if (!static_branch_likely(&chacha_use_simd) ||
|
||||
!crypto_simd_usable()) {
|
||||
chacha_crypt_generic(state, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, nbytes,
|
||||
ctx->nrounds);
|
||||
} else {
|
||||
kernel_fpu_begin();
|
||||
next_yield = 4096;
|
||||
chacha_dosimd(state, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, nbytes,
|
||||
ctx->nrounds);
|
||||
kernel_fpu_end();
|
||||
}
|
||||
|
||||
err = skcipher_walk_done(walk, walk->nbytes - nbytes);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -163,55 +202,34 @@ static int chacha_simd(struct skcipher_request *req)
|
|||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_chacha_crypt(req);
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
kernel_fpu_begin();
|
||||
err = chacha_simd_stream_xor(&walk, ctx, req->iv);
|
||||
kernel_fpu_end();
|
||||
return err;
|
||||
return chacha_simd_stream_xor(req, ctx, req->iv);
|
||||
}
|
||||
|
||||
static int xchacha_simd(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
struct chacha_ctx subctx;
|
||||
u32 *state, state_buf[16 + 2] __aligned(8);
|
||||
struct chacha_ctx subctx;
|
||||
u8 real_iv[16];
|
||||
int err;
|
||||
|
||||
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
|
||||
return crypto_xchacha_crypt(req);
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
|
||||
state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
|
||||
crypto_chacha_init(state, ctx, req->iv);
|
||||
chacha_init_generic(state, ctx->key, req->iv);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
|
||||
if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
|
||||
kernel_fpu_begin();
|
||||
hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
|
||||
kernel_fpu_end();
|
||||
} else {
|
||||
hchacha_block_generic(state, subctx.key, ctx->nrounds);
|
||||
}
|
||||
subctx.nrounds = ctx->nrounds;
|
||||
|
||||
memcpy(&real_iv[0], req->iv + 24, 8);
|
||||
memcpy(&real_iv[8], req->iv + 16, 8);
|
||||
err = chacha_simd_stream_xor(&walk, &subctx, real_iv);
|
||||
|
||||
kernel_fpu_end();
|
||||
|
||||
return err;
|
||||
return chacha_simd_stream_xor(req, &subctx, real_iv);
|
||||
}
|
||||
|
||||
static struct skcipher_alg algs[] = {
|
||||
|
@ -227,7 +245,7 @@ static struct skcipher_alg algs[] = {
|
|||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = CHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = chacha_simd,
|
||||
.decrypt = chacha_simd,
|
||||
}, {
|
||||
|
@ -242,7 +260,7 @@ static struct skcipher_alg algs[] = {
|
|||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = xchacha_simd,
|
||||
.decrypt = xchacha_simd,
|
||||
}, {
|
||||
|
@ -257,7 +275,7 @@ static struct skcipher_alg algs[] = {
|
|||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha12_setkey,
|
||||
.setkey = chacha12_setkey,
|
||||
.encrypt = xchacha_simd,
|
||||
.decrypt = xchacha_simd,
|
||||
},
|
||||
|
@ -266,24 +284,28 @@ static struct skcipher_alg algs[] = {
|
|||
static int __init chacha_simd_mod_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
chacha_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX2) &&
|
||||
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
|
||||
#ifdef CONFIG_AS_AVX512
|
||||
chacha_use_avx512vl = chacha_use_avx2 &&
|
||||
boot_cpu_has(X86_FEATURE_AVX512VL) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX512BW); /* kmovq */
|
||||
#endif
|
||||
#endif
|
||||
static_branch_enable(&chacha_use_simd);
|
||||
|
||||
if (IS_ENABLED(CONFIG_AS_AVX2) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX2) &&
|
||||
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
|
||||
static_branch_enable(&chacha_use_avx2);
|
||||
|
||||
if (IS_ENABLED(CONFIG_AS_AVX512) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX512VL) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */
|
||||
static_branch_enable(&chacha_use_avx512vl);
|
||||
}
|
||||
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
static void __exit chacha_simd_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
|
||||
if (boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
module_init(chacha_simd_mod_init);
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -7,47 +7,23 @@
|
|||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/poly1305.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/poly1305.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
struct poly1305_simd_desc_ctx {
|
||||
struct poly1305_desc_ctx base;
|
||||
/* derived key u set? */
|
||||
bool uset;
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
/* derived keys r^3, r^4 set? */
|
||||
bool wset;
|
||||
#endif
|
||||
/* derived Poly1305 key r^2 */
|
||||
u32 u[5];
|
||||
/* ... silently appended r^3 and r^4 when using AVX2 */
|
||||
};
|
||||
|
||||
asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src,
|
||||
const u32 *r, unsigned int blocks);
|
||||
asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r,
|
||||
unsigned int blocks, const u32 *u);
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r,
|
||||
unsigned int blocks, const u32 *u);
|
||||
static bool poly1305_use_avx2;
|
||||
#endif
|
||||
|
||||
static int poly1305_simd_init(struct shash_desc *desc)
|
||||
{
|
||||
struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc);
|
||||
|
||||
sctx->uset = false;
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
sctx->wset = false;
|
||||
#endif
|
||||
|
||||
return crypto_poly1305_init(desc);
|
||||
}
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_simd);
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2);
|
||||
|
||||
static void poly1305_simd_mult(u32 *a, const u32 *b)
|
||||
{
|
||||
|
@ -60,73 +36,86 @@ static void poly1305_simd_mult(u32 *a, const u32 *b)
|
|||
poly1305_block_sse2(a, m, b, 1);
|
||||
}
|
||||
|
||||
static unsigned int poly1305_scalar_blocks(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
unsigned int datalen;
|
||||
|
||||
if (unlikely(!dctx->sset)) {
|
||||
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
|
||||
src += srclen - datalen;
|
||||
srclen = datalen;
|
||||
}
|
||||
if (srclen >= POLY1305_BLOCK_SIZE) {
|
||||
poly1305_core_blocks(&dctx->h, dctx->r, src,
|
||||
srclen / POLY1305_BLOCK_SIZE, 1);
|
||||
srclen %= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
return srclen;
|
||||
}
|
||||
|
||||
static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct poly1305_simd_desc_ctx *sctx;
|
||||
unsigned int blocks, datalen;
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base));
|
||||
sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base);
|
||||
|
||||
if (unlikely(!dctx->sset)) {
|
||||
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
|
||||
src += srclen - datalen;
|
||||
srclen = datalen;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) {
|
||||
if (unlikely(!sctx->wset)) {
|
||||
if (!sctx->uset) {
|
||||
memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
|
||||
poly1305_simd_mult(sctx->u, dctx->r.r);
|
||||
sctx->uset = true;
|
||||
if (IS_ENABLED(CONFIG_AS_AVX2) &&
|
||||
static_branch_likely(&poly1305_use_avx2) &&
|
||||
srclen >= POLY1305_BLOCK_SIZE * 4) {
|
||||
if (unlikely(dctx->rset < 4)) {
|
||||
if (dctx->rset < 2) {
|
||||
dctx->r[1] = dctx->r[0];
|
||||
poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r);
|
||||
}
|
||||
memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u));
|
||||
poly1305_simd_mult(sctx->u + 5, dctx->r.r);
|
||||
memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u));
|
||||
poly1305_simd_mult(sctx->u + 10, dctx->r.r);
|
||||
sctx->wset = true;
|
||||
dctx->r[2] = dctx->r[1];
|
||||
poly1305_simd_mult(dctx->r[2].r, dctx->r[0].r);
|
||||
dctx->r[3] = dctx->r[2];
|
||||
poly1305_simd_mult(dctx->r[3].r, dctx->r[0].r);
|
||||
dctx->rset = 4;
|
||||
}
|
||||
blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
|
||||
poly1305_4block_avx2(dctx->h.h, src, dctx->r.r, blocks,
|
||||
sctx->u);
|
||||
poly1305_4block_avx2(dctx->h.h, src, dctx->r[0].r, blocks,
|
||||
dctx->r[1].r);
|
||||
src += POLY1305_BLOCK_SIZE * 4 * blocks;
|
||||
srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
|
||||
if (unlikely(!sctx->uset)) {
|
||||
memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
|
||||
poly1305_simd_mult(sctx->u, dctx->r.r);
|
||||
sctx->uset = true;
|
||||
if (unlikely(dctx->rset < 2)) {
|
||||
dctx->r[1] = dctx->r[0];
|
||||
poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r);
|
||||
dctx->rset = 2;
|
||||
}
|
||||
blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
|
||||
poly1305_2block_sse2(dctx->h.h, src, dctx->r.r, blocks,
|
||||
sctx->u);
|
||||
poly1305_2block_sse2(dctx->h.h, src, dctx->r[0].r,
|
||||
blocks, dctx->r[1].r);
|
||||
src += POLY1305_BLOCK_SIZE * 2 * blocks;
|
||||
srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
|
||||
}
|
||||
if (srclen >= POLY1305_BLOCK_SIZE) {
|
||||
poly1305_block_sse2(dctx->h.h, src, dctx->r.r, 1);
|
||||
poly1305_block_sse2(dctx->h.h, src, dctx->r[0].r, 1);
|
||||
srclen -= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
return srclen;
|
||||
}
|
||||
|
||||
static int poly1305_simd_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key)
|
||||
{
|
||||
poly1305_init_generic(desc, key);
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_init_arch);
|
||||
|
||||
void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
|
||||
unsigned int srclen)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
unsigned int bytes;
|
||||
|
||||
/* kernel_fpu_begin/end is costly, use fallback for small updates */
|
||||
if (srclen <= 288 || !crypto_simd_usable())
|
||||
return crypto_poly1305_update(desc, src, srclen);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
if (unlikely(dctx->buflen)) {
|
||||
bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
memcpy(dctx->buf + dctx->buflen, src, bytes);
|
||||
|
@ -135,34 +124,84 @@ static int poly1305_simd_update(struct shash_desc *desc,
|
|||
dctx->buflen += bytes;
|
||||
|
||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||
poly1305_simd_blocks(dctx, dctx->buf,
|
||||
POLY1305_BLOCK_SIZE);
|
||||
if (static_branch_likely(&poly1305_use_simd) &&
|
||||
likely(crypto_simd_usable())) {
|
||||
kernel_fpu_begin();
|
||||
poly1305_simd_blocks(dctx, dctx->buf,
|
||||
POLY1305_BLOCK_SIZE);
|
||||
kernel_fpu_end();
|
||||
} else {
|
||||
poly1305_scalar_blocks(dctx, dctx->buf,
|
||||
POLY1305_BLOCK_SIZE);
|
||||
}
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
|
||||
bytes = poly1305_simd_blocks(dctx, src, srclen);
|
||||
if (static_branch_likely(&poly1305_use_simd) &&
|
||||
likely(crypto_simd_usable())) {
|
||||
kernel_fpu_begin();
|
||||
bytes = poly1305_simd_blocks(dctx, src, srclen);
|
||||
kernel_fpu_end();
|
||||
} else {
|
||||
bytes = poly1305_scalar_blocks(dctx, src, srclen);
|
||||
}
|
||||
src += srclen - bytes;
|
||||
srclen = bytes;
|
||||
}
|
||||
|
||||
kernel_fpu_end();
|
||||
|
||||
if (unlikely(srclen)) {
|
||||
dctx->buflen = srclen;
|
||||
memcpy(dctx->buf, src, srclen);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_update_arch);
|
||||
|
||||
void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest)
|
||||
{
|
||||
poly1305_final_generic(desc, digest);
|
||||
}
|
||||
EXPORT_SYMBOL(poly1305_final_arch);
|
||||
|
||||
static int crypto_poly1305_init(struct shash_desc *desc)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
poly1305_core_init(&dctx->h);
|
||||
dctx->buflen = 0;
|
||||
dctx->rset = 0;
|
||||
dctx->sset = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
if (unlikely(!dctx->sset))
|
||||
return -ENOKEY;
|
||||
|
||||
poly1305_final_generic(dctx, dst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int poly1305_simd_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
poly1305_update_arch(dctx, src, srclen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = POLY1305_DIGEST_SIZE,
|
||||
.init = poly1305_simd_init,
|
||||
.init = crypto_poly1305_init,
|
||||
.update = poly1305_simd_update,
|
||||
.final = crypto_poly1305_final,
|
||||
.descsize = sizeof(struct poly1305_simd_desc_ctx),
|
||||
.descsize = sizeof(struct poly1305_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "poly1305",
|
||||
.cra_driver_name = "poly1305-simd",
|
||||
|
@ -175,16 +214,16 @@ static struct shash_alg alg = {
|
|||
static int __init poly1305_simd_mod_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_XMM2))
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
static_branch_enable(&poly1305_use_simd);
|
||||
|
||||
if (IS_ENABLED(CONFIG_AS_AVX2) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX2) &&
|
||||
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
|
||||
static_branch_enable(&poly1305_use_avx2);
|
||||
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
poly1305_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX2) &&
|
||||
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
|
||||
alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
|
||||
if (poly1305_use_avx2)
|
||||
alg.descsize += 10 * sizeof(u32);
|
||||
#endif
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
|
|
171
crypto/Kconfig
171
crypto/Kconfig
|
@ -52,12 +52,12 @@ config CRYPTO_AEAD2
|
|||
select CRYPTO_NULL2
|
||||
select CRYPTO_RNG2
|
||||
|
||||
config CRYPTO_BLKCIPHER
|
||||
config CRYPTO_SKCIPHER
|
||||
tristate
|
||||
select CRYPTO_BLKCIPHER2
|
||||
select CRYPTO_SKCIPHER2
|
||||
select CRYPTO_ALGAPI
|
||||
|
||||
config CRYPTO_BLKCIPHER2
|
||||
config CRYPTO_SKCIPHER2
|
||||
tristate
|
||||
select CRYPTO_ALGAPI2
|
||||
select CRYPTO_RNG2
|
||||
|
@ -123,7 +123,7 @@ config CRYPTO_MANAGER2
|
|||
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
|
||||
select CRYPTO_AEAD2
|
||||
select CRYPTO_HASH2
|
||||
select CRYPTO_BLKCIPHER2
|
||||
select CRYPTO_SKCIPHER2
|
||||
select CRYPTO_AKCIPHER2
|
||||
select CRYPTO_KPP2
|
||||
select CRYPTO_ACOMP2
|
||||
|
@ -169,7 +169,7 @@ config CRYPTO_NULL
|
|||
config CRYPTO_NULL2
|
||||
tristate
|
||||
select CRYPTO_ALGAPI2
|
||||
select CRYPTO_BLKCIPHER2
|
||||
select CRYPTO_SKCIPHER2
|
||||
select CRYPTO_HASH2
|
||||
|
||||
config CRYPTO_PCRYPT
|
||||
|
@ -184,7 +184,7 @@ config CRYPTO_PCRYPT
|
|||
|
||||
config CRYPTO_CRYPTD
|
||||
tristate "Software async crypto daemon"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
|
@ -195,7 +195,7 @@ config CRYPTO_CRYPTD
|
|||
config CRYPTO_AUTHENC
|
||||
tristate "Authenc support"
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_NULL
|
||||
|
@ -217,7 +217,7 @@ config CRYPTO_SIMD
|
|||
config CRYPTO_GLUE_HELPER_X86
|
||||
tristate
|
||||
depends on X86
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
|
||||
config CRYPTO_ENGINE
|
||||
tristate
|
||||
|
@ -264,6 +264,17 @@ config CRYPTO_ECRDSA
|
|||
standard algorithms (called GOST algorithms). Only signature verification
|
||||
is implemented.
|
||||
|
||||
config CRYPTO_CURVE25519
|
||||
tristate "Curve25519 algorithm"
|
||||
select CRYPTO_KPP
|
||||
select CRYPTO_LIB_CURVE25519_GENERIC
|
||||
|
||||
config CRYPTO_CURVE25519_X86
|
||||
tristate "x86_64 accelerated Curve25519 scalar multiplication library"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_LIB_CURVE25519_GENERIC
|
||||
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
|
||||
|
||||
comment "Authenticated Encryption with Associated Data"
|
||||
|
||||
config CRYPTO_CCM
|
||||
|
@ -309,6 +320,7 @@ config CRYPTO_AEGIS128
|
|||
config CRYPTO_AEGIS128_SIMD
|
||||
bool "Support SIMD acceleration for AEGIS-128"
|
||||
depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON)
|
||||
depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800
|
||||
default y
|
||||
|
||||
config CRYPTO_AEGIS128_AESNI_SSE2
|
||||
|
@ -322,7 +334,7 @@ config CRYPTO_AEGIS128_AESNI_SSE2
|
|||
config CRYPTO_SEQIV
|
||||
tristate "Sequence Number IV Generator"
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_NULL
|
||||
select CRYPTO_RNG_DEFAULT
|
||||
select CRYPTO_MANAGER
|
||||
|
@ -345,7 +357,7 @@ comment "Block modes"
|
|||
|
||||
config CRYPTO_CBC
|
||||
tristate "CBC support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
CBC: Cipher Block Chaining mode
|
||||
|
@ -353,7 +365,7 @@ config CRYPTO_CBC
|
|||
|
||||
config CRYPTO_CFB
|
||||
tristate "CFB support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
CFB: Cipher FeedBack mode
|
||||
|
@ -361,7 +373,7 @@ config CRYPTO_CFB
|
|||
|
||||
config CRYPTO_CTR
|
||||
tristate "CTR support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_SEQIV
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
|
@ -370,7 +382,7 @@ config CRYPTO_CTR
|
|||
|
||||
config CRYPTO_CTS
|
||||
tristate "CTS support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
CTS: Cipher Text Stealing
|
||||
|
@ -385,7 +397,7 @@ config CRYPTO_CTS
|
|||
|
||||
config CRYPTO_ECB
|
||||
tristate "ECB support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
ECB: Electronic CodeBook mode
|
||||
|
@ -394,7 +406,7 @@ config CRYPTO_ECB
|
|||
|
||||
config CRYPTO_LRW
|
||||
tristate "LRW support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_GF128MUL
|
||||
help
|
||||
|
@ -406,7 +418,7 @@ config CRYPTO_LRW
|
|||
|
||||
config CRYPTO_OFB
|
||||
tristate "OFB support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
OFB: the Output Feedback mode makes a block cipher into a synchronous
|
||||
|
@ -418,7 +430,7 @@ config CRYPTO_OFB
|
|||
|
||||
config CRYPTO_PCBC
|
||||
tristate "PCBC support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
PCBC: Propagating Cipher Block Chaining mode
|
||||
|
@ -426,7 +438,7 @@ config CRYPTO_PCBC
|
|||
|
||||
config CRYPTO_XTS
|
||||
tristate "XTS support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_ECB
|
||||
help
|
||||
|
@ -436,7 +448,7 @@ config CRYPTO_XTS
|
|||
|
||||
config CRYPTO_KEYWRAP
|
||||
tristate "Key wrapping support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
Support for key wrapping (NIST SP800-38F / RFC3394) without
|
||||
|
@ -445,7 +457,7 @@ config CRYPTO_KEYWRAP
|
|||
config CRYPTO_NHPOLY1305
|
||||
tristate
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_POLY1305
|
||||
select CRYPTO_LIB_POLY1305_GENERIC
|
||||
|
||||
config CRYPTO_NHPOLY1305_SSE2
|
||||
tristate "NHPoly1305 hash function (x86_64 SSE2 implementation)"
|
||||
|
@ -466,7 +478,7 @@ config CRYPTO_NHPOLY1305_AVX2
|
|||
config CRYPTO_ADIANTUM
|
||||
tristate "Adiantum support"
|
||||
select CRYPTO_CHACHA20
|
||||
select CRYPTO_POLY1305
|
||||
select CRYPTO_LIB_POLY1305_GENERIC
|
||||
select CRYPTO_NHPOLY1305
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
|
@ -638,6 +650,47 @@ config CRYPTO_XXHASH
|
|||
xxHash non-cryptographic hash algorithm. Extremely fast, working at
|
||||
speeds close to RAM limits.
|
||||
|
||||
config CRYPTO_BLAKE2B
|
||||
tristate "BLAKE2b digest algorithm"
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Implementation of cryptographic hash function BLAKE2b (or just BLAKE2),
|
||||
optimized for 64bit platforms and can produce digests of any size
|
||||
between 1 to 64. The keyed hash is also implemented.
|
||||
|
||||
This module provides the following algorithms:
|
||||
|
||||
- blake2b-160
|
||||
- blake2b-256
|
||||
- blake2b-384
|
||||
- blake2b-512
|
||||
|
||||
See https://blake2.net for further information.
|
||||
|
||||
config CRYPTO_BLAKE2S
|
||||
tristate "BLAKE2s digest algorithm"
|
||||
select CRYPTO_LIB_BLAKE2S_GENERIC
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Implementation of cryptographic hash function BLAKE2s
|
||||
optimized for 8-32bit platforms and can produce digests of any size
|
||||
between 1 to 32. The keyed hash is also implemented.
|
||||
|
||||
This module provides the following algorithms:
|
||||
|
||||
- blake2s-128
|
||||
- blake2s-160
|
||||
- blake2s-224
|
||||
- blake2s-256
|
||||
|
||||
See https://blake2.net for further information.
|
||||
|
||||
config CRYPTO_BLAKE2S_X86
|
||||
tristate "BLAKE2s digest algorithm (x86 accelerated version)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_LIB_BLAKE2S_GENERIC
|
||||
select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
|
||||
|
||||
config CRYPTO_CRCT10DIF
|
||||
tristate "CRCT10DIF algorithm"
|
||||
select CRYPTO_HASH
|
||||
|
@ -685,6 +738,7 @@ config CRYPTO_GHASH
|
|||
config CRYPTO_POLY1305
|
||||
tristate "Poly1305 authenticator algorithm"
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_LIB_POLY1305_GENERIC
|
||||
help
|
||||
Poly1305 authenticator algorithm, RFC7539.
|
||||
|
||||
|
@ -695,7 +749,8 @@ config CRYPTO_POLY1305
|
|||
config CRYPTO_POLY1305_X86_64
|
||||
tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_POLY1305
|
||||
select CRYPTO_LIB_POLY1305_GENERIC
|
||||
select CRYPTO_ARCH_HAVE_LIB_POLY1305
|
||||
help
|
||||
Poly1305 authenticator algorithm, RFC7539.
|
||||
|
||||
|
@ -704,6 +759,11 @@ config CRYPTO_POLY1305_X86_64
|
|||
in IETF protocols. This is the x86_64 assembler implementation using SIMD
|
||||
instructions.
|
||||
|
||||
config CRYPTO_POLY1305_MIPS
|
||||
tristate "Poly1305 authenticator algorithm (MIPS optimized)"
|
||||
depends on CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
|
||||
select CRYPTO_ARCH_HAVE_LIB_POLY1305
|
||||
|
||||
config CRYPTO_MD4
|
||||
tristate "MD4 digest algorithm"
|
||||
select CRYPTO_HASH
|
||||
|
@ -877,9 +937,6 @@ config CRYPTO_SHA1_PPC_SPE
|
|||
SHA-1 secure hash standard (DFIPS 180-4) implemented
|
||||
using powerpc SPE SIMD instruction set.
|
||||
|
||||
config CRYPTO_LIB_SHA256
|
||||
tristate
|
||||
|
||||
config CRYPTO_SHA256
|
||||
tristate "SHA224 and SHA256 digest algorithm"
|
||||
select CRYPTO_HASH
|
||||
|
@ -1018,9 +1075,6 @@ config CRYPTO_GHASH_CLMUL_NI_INTEL
|
|||
|
||||
comment "Ciphers"
|
||||
|
||||
config CRYPTO_LIB_AES
|
||||
tristate
|
||||
|
||||
config CRYPTO_AES
|
||||
tristate "AES cipher algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
|
@ -1067,7 +1121,7 @@ config CRYPTO_AES_NI_INTEL
|
|||
select CRYPTO_AEAD
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86 if 64BIT
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
|
@ -1097,8 +1151,7 @@ config CRYPTO_AES_NI_INTEL
|
|||
config CRYPTO_AES_SPARC64
|
||||
tristate "AES cipher algorithms (SPARC64)"
|
||||
depends on SPARC64
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_SKCIPHER
|
||||
help
|
||||
Use SPARC64 crypto opcodes for AES algorithm.
|
||||
|
||||
|
@ -1125,6 +1178,7 @@ config CRYPTO_AES_SPARC64
|
|||
config CRYPTO_AES_PPC_SPE
|
||||
tristate "AES cipher algorithms (PPC SPE)"
|
||||
depends on PPC && SPE
|
||||
select CRYPTO_SKCIPHER
|
||||
help
|
||||
AES cipher algorithms (FIPS-197). Additionally the acceleration
|
||||
for popular block cipher modes ECB, CBC, CTR and XTS is supported.
|
||||
|
@ -1149,12 +1203,9 @@ config CRYPTO_ANUBIS
|
|||
<https://www.cosic.esat.kuleuven.be/nessie/reports/>
|
||||
<http://www.larc.usp.br/~pbarreto/AnubisPage.html>
|
||||
|
||||
config CRYPTO_LIB_ARC4
|
||||
tristate
|
||||
|
||||
config CRYPTO_ARC4
|
||||
tristate "ARC4 cipher algorithm"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_LIB_ARC4
|
||||
help
|
||||
ARC4 cipher algorithm.
|
||||
|
@ -1190,7 +1241,7 @@ config CRYPTO_BLOWFISH_COMMON
|
|||
config CRYPTO_BLOWFISH_X86_64
|
||||
tristate "Blowfish cipher algorithm (x86_64)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_BLOWFISH_COMMON
|
||||
help
|
||||
Blowfish cipher algorithm (x86_64), by Bruce Schneier.
|
||||
|
@ -1221,7 +1272,7 @@ config CRYPTO_CAMELLIA_X86_64
|
|||
tristate "Camellia cipher algorithm (x86_64)"
|
||||
depends on X86 && 64BIT
|
||||
depends on CRYPTO
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
help
|
||||
Camellia cipher algorithm module (x86_64).
|
||||
|
@ -1238,7 +1289,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
|
|||
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
depends on CRYPTO
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_CAMELLIA_X86_64
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SIMD
|
||||
|
@ -1275,6 +1326,7 @@ config CRYPTO_CAMELLIA_SPARC64
|
|||
depends on SPARC64
|
||||
depends on CRYPTO
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_SKCIPHER
|
||||
help
|
||||
Camellia cipher algorithm module (SPARC64).
|
||||
|
||||
|
@ -1303,7 +1355,7 @@ config CRYPTO_CAST5
|
|||
config CRYPTO_CAST5_AVX_X86_64
|
||||
tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_CAST5
|
||||
select CRYPTO_CAST_COMMON
|
||||
select CRYPTO_SIMD
|
||||
|
@ -1325,7 +1377,7 @@ config CRYPTO_CAST6
|
|||
config CRYPTO_CAST6_AVX_X86_64
|
||||
tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_CAST6
|
||||
select CRYPTO_CAST_COMMON
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
|
@ -1338,9 +1390,6 @@ config CRYPTO_CAST6_AVX_X86_64
|
|||
This module provides the Cast6 cipher algorithm that processes
|
||||
eight blocks parallel using the AVX instruction set.
|
||||
|
||||
config CRYPTO_LIB_DES
|
||||
tristate
|
||||
|
||||
config CRYPTO_DES
|
||||
tristate "DES and Triple DES EDE cipher algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
|
@ -1353,6 +1402,7 @@ config CRYPTO_DES_SPARC64
|
|||
depends on SPARC64
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_LIB_DES
|
||||
select CRYPTO_SKCIPHER
|
||||
help
|
||||
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3),
|
||||
optimized using SPARC64 crypto opcodes.
|
||||
|
@ -1360,7 +1410,7 @@ config CRYPTO_DES_SPARC64
|
|||
config CRYPTO_DES3_EDE_X86_64
|
||||
tristate "Triple DES EDE cipher algorithm (x86-64)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_LIB_DES
|
||||
help
|
||||
Triple DES EDE (FIPS 46-3) algorithm.
|
||||
|
@ -1373,7 +1423,7 @@ config CRYPTO_DES3_EDE_X86_64
|
|||
config CRYPTO_FCRYPT
|
||||
tristate "FCrypt cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
help
|
||||
FCrypt algorithm used by RxRPC.
|
||||
|
||||
|
@ -1392,7 +1442,7 @@ config CRYPTO_KHAZAD
|
|||
|
||||
config CRYPTO_SALSA20
|
||||
tristate "Salsa20 stream cipher algorithm"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
help
|
||||
Salsa20 stream cipher algorithm.
|
||||
|
||||
|
@ -1404,7 +1454,8 @@ config CRYPTO_SALSA20
|
|||
|
||||
config CRYPTO_CHACHA20
|
||||
tristate "ChaCha stream cipher algorithms"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_LIB_CHACHA_GENERIC
|
||||
select CRYPTO_SKCIPHER
|
||||
help
|
||||
The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms.
|
||||
|
||||
|
@ -1426,12 +1477,19 @@ config CRYPTO_CHACHA20
|
|||
config CRYPTO_CHACHA20_X86_64
|
||||
tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_CHACHA20
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_LIB_CHACHA_GENERIC
|
||||
select CRYPTO_ARCH_HAVE_LIB_CHACHA
|
||||
help
|
||||
SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20,
|
||||
XChaCha20, and XChaCha12 stream ciphers.
|
||||
|
||||
config CRYPTO_CHACHA_MIPS
|
||||
tristate "ChaCha stream cipher algorithms (MIPS 32r2 optimized)"
|
||||
depends on CPU_MIPS32_R2
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_ARCH_HAVE_LIB_CHACHA
|
||||
|
||||
config CRYPTO_SEED
|
||||
tristate "SEED cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
|
@ -1462,7 +1520,7 @@ config CRYPTO_SERPENT
|
|||
config CRYPTO_SERPENT_SSE2_X86_64
|
||||
tristate "Serpent cipher algorithm (x86_64/SSE2)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SERPENT
|
||||
select CRYPTO_SIMD
|
||||
|
@ -1481,7 +1539,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
|
|||
config CRYPTO_SERPENT_SSE2_586
|
||||
tristate "Serpent cipher algorithm (i586/SSE2)"
|
||||
depends on X86 && !64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SERPENT
|
||||
select CRYPTO_SIMD
|
||||
|
@ -1500,7 +1558,7 @@ config CRYPTO_SERPENT_SSE2_586
|
|||
config CRYPTO_SERPENT_AVX_X86_64
|
||||
tristate "Serpent cipher algorithm (x86_64/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SERPENT
|
||||
select CRYPTO_SIMD
|
||||
|
@ -1631,7 +1689,7 @@ config CRYPTO_TWOFISH_X86_64
|
|||
config CRYPTO_TWOFISH_X86_64_3WAY
|
||||
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_TWOFISH_COMMON
|
||||
select CRYPTO_TWOFISH_X86_64
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
|
@ -1652,7 +1710,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY
|
|||
config CRYPTO_TWOFISH_AVX_X86_64
|
||||
tristate "Twofish cipher algorithm (x86_64/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SIMD
|
||||
select CRYPTO_TWOFISH_COMMON
|
||||
|
@ -1803,7 +1861,7 @@ config CRYPTO_USER_API_HASH
|
|||
config CRYPTO_USER_API_SKCIPHER
|
||||
tristate "User-space interface for symmetric key cipher algorithms"
|
||||
depends on NET
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_USER_API
|
||||
help
|
||||
This option enables the user-spaces interface for symmetric
|
||||
|
@ -1822,7 +1880,7 @@ config CRYPTO_USER_API_AEAD
|
|||
tristate "User-space interface for AEAD cipher algorithms"
|
||||
depends on NET
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_NULL
|
||||
select CRYPTO_USER_API
|
||||
help
|
||||
|
@ -1844,6 +1902,7 @@ config CRYPTO_STATS
|
|||
config CRYPTO_HASH_INFO
|
||||
bool
|
||||
|
||||
source "lib/crypto/Kconfig"
|
||||
source "drivers/crypto/Kconfig"
|
||||
source "crypto/asymmetric_keys/Kconfig"
|
||||
source "certs/Kconfig"
|
||||
|
|
|
@ -14,11 +14,9 @@ crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y)
|
|||
obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
|
||||
obj-$(CONFIG_CRYPTO_AEAD2) += geniv.o
|
||||
|
||||
crypto_blkcipher-y := ablkcipher.o
|
||||
crypto_blkcipher-y += blkcipher.o
|
||||
crypto_blkcipher-y += skcipher.o
|
||||
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
|
||||
obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o
|
||||
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
|
||||
obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
|
||||
|
||||
|
@ -74,6 +72,8 @@ obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
|
|||
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
|
||||
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
|
||||
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o
|
||||
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
|
||||
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
|
||||
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
|
||||
|
@ -93,7 +93,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o
|
|||
aegis128-y := aegis128-core.o
|
||||
|
||||
ifeq ($(ARCH),arm)
|
||||
CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp
|
||||
CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv8-a -mfloat-abi=softfp
|
||||
CFLAGS_aegis128-neon-inner.o += -mfpu=crypto-neon-fp-armv8
|
||||
aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o
|
||||
endif
|
||||
|
@ -166,6 +166,7 @@ obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
|
|||
obj-$(CONFIG_CRYPTO_OFB) += ofb.o
|
||||
obj-$(CONFIG_CRYPTO_ECC) += ecc.o
|
||||
obj-$(CONFIG_CRYPTO_ESSIV) += essiv.o
|
||||
obj-$(CONFIG_CRYPTO_CURVE25519) += curve25519-generic.o
|
||||
|
||||
ecdh_generic-y += ecdh.o
|
||||
ecdh_generic-y += ecdh_helper.o
|
||||
|
|
|
@ -1,407 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Asynchronous block chaining cipher operations.
|
||||
*
|
||||
* This is the asynchronous version of blkcipher.c indicating completion
|
||||
* via a callback.
|
||||
*
|
||||
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*/
|
||||
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/cryptouser.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <net/netlink.h>
|
||||
|
||||
#include <crypto/scatterwalk.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
struct ablkcipher_buffer {
|
||||
struct list_head entry;
|
||||
struct scatter_walk dst;
|
||||
unsigned int len;
|
||||
void *data;
|
||||
};
|
||||
|
||||
enum {
|
||||
ABLKCIPHER_WALK_SLOW = 1 << 0,
|
||||
};
|
||||
|
||||
static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
|
||||
{
|
||||
scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
|
||||
}
|
||||
|
||||
void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
|
||||
{
|
||||
struct ablkcipher_buffer *p, *tmp;
|
||||
|
||||
list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
|
||||
ablkcipher_buffer_write(p);
|
||||
list_del(&p->entry);
|
||||
kfree(p);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
|
||||
|
||||
static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
|
||||
struct ablkcipher_buffer *p)
|
||||
{
|
||||
p->dst = walk->out;
|
||||
list_add_tail(&p->entry, &walk->buffers);
|
||||
}
|
||||
|
||||
/* Get a spot of the specified length that does not straddle a page.
|
||||
* The caller needs to ensure that there is enough space for this operation.
|
||||
*/
|
||||
static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
|
||||
{
|
||||
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
|
||||
|
||||
return max(start, end_page);
|
||||
}
|
||||
|
||||
static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
|
||||
unsigned int n)
|
||||
{
|
||||
for (;;) {
|
||||
unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
|
||||
|
||||
if (len_this_page > n)
|
||||
len_this_page = n;
|
||||
scatterwalk_advance(&walk->out, n);
|
||||
if (n == len_this_page)
|
||||
break;
|
||||
n -= len_this_page;
|
||||
scatterwalk_start(&walk->out, sg_next(walk->out.sg));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
|
||||
unsigned int n)
|
||||
{
|
||||
scatterwalk_advance(&walk->in, n);
|
||||
scatterwalk_advance(&walk->out, n);
|
||||
}
|
||||
|
||||
static int ablkcipher_walk_next(struct ablkcipher_request *req,
|
||||
struct ablkcipher_walk *walk);
|
||||
|
||||
int ablkcipher_walk_done(struct ablkcipher_request *req,
|
||||
struct ablkcipher_walk *walk, int err)
|
||||
{
|
||||
struct crypto_tfm *tfm = req->base.tfm;
|
||||
unsigned int n; /* bytes processed */
|
||||
bool more;
|
||||
|
||||
if (unlikely(err < 0))
|
||||
goto finish;
|
||||
|
||||
n = walk->nbytes - err;
|
||||
walk->total -= n;
|
||||
more = (walk->total != 0);
|
||||
|
||||
if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
|
||||
ablkcipher_done_fast(walk, n);
|
||||
} else {
|
||||
if (WARN_ON(err)) {
|
||||
/* unexpected case; didn't process all bytes */
|
||||
err = -EINVAL;
|
||||
goto finish;
|
||||
}
|
||||
ablkcipher_done_slow(walk, n);
|
||||
}
|
||||
|
||||
scatterwalk_done(&walk->in, 0, more);
|
||||
scatterwalk_done(&walk->out, 1, more);
|
||||
|
||||
if (more) {
|
||||
crypto_yield(req->base.flags);
|
||||
return ablkcipher_walk_next(req, walk);
|
||||
}
|
||||
err = 0;
|
||||
finish:
|
||||
walk->nbytes = 0;
|
||||
if (walk->iv != req->info)
|
||||
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
|
||||
kfree(walk->iv_buffer);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
|
||||
|
||||
static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
|
||||
struct ablkcipher_walk *walk,
|
||||
unsigned int bsize,
|
||||
unsigned int alignmask,
|
||||
void **src_p, void **dst_p)
|
||||
{
|
||||
unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
|
||||
struct ablkcipher_buffer *p;
|
||||
void *src, *dst, *base;
|
||||
unsigned int n;
|
||||
|
||||
n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
|
||||
n += (aligned_bsize * 3 - (alignmask + 1) +
|
||||
(alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
|
||||
|
||||
p = kmalloc(n, GFP_ATOMIC);
|
||||
if (!p)
|
||||
return ablkcipher_walk_done(req, walk, -ENOMEM);
|
||||
|
||||
base = p + 1;
|
||||
|
||||
dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
|
||||
src = dst = ablkcipher_get_spot(dst, bsize);
|
||||
|
||||
p->len = bsize;
|
||||
p->data = dst;
|
||||
|
||||
scatterwalk_copychunks(src, &walk->in, bsize, 0);
|
||||
|
||||
ablkcipher_queue_write(walk, p);
|
||||
|
||||
walk->nbytes = bsize;
|
||||
walk->flags |= ABLKCIPHER_WALK_SLOW;
|
||||
|
||||
*src_p = src;
|
||||
*dst_p = dst;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
|
||||
struct crypto_tfm *tfm,
|
||||
unsigned int alignmask)
|
||||
{
|
||||
unsigned bs = walk->blocksize;
|
||||
unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
|
||||
unsigned aligned_bs = ALIGN(bs, alignmask + 1);
|
||||
unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
|
||||
(alignmask + 1);
|
||||
u8 *iv;
|
||||
|
||||
size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
|
||||
walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
|
||||
if (!walk->iv_buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
|
||||
iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
|
||||
iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
|
||||
iv = ablkcipher_get_spot(iv, ivsize);
|
||||
|
||||
walk->iv = memcpy(iv, walk->iv, ivsize);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
|
||||
struct ablkcipher_walk *walk)
|
||||
{
|
||||
walk->src.page = scatterwalk_page(&walk->in);
|
||||
walk->src.offset = offset_in_page(walk->in.offset);
|
||||
walk->dst.page = scatterwalk_page(&walk->out);
|
||||
walk->dst.offset = offset_in_page(walk->out.offset);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ablkcipher_walk_next(struct ablkcipher_request *req,
|
||||
struct ablkcipher_walk *walk)
|
||||
{
|
||||
struct crypto_tfm *tfm = req->base.tfm;
|
||||
unsigned int alignmask, bsize, n;
|
||||
void *src, *dst;
|
||||
int err;
|
||||
|
||||
alignmask = crypto_tfm_alg_alignmask(tfm);
|
||||
n = walk->total;
|
||||
if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
|
||||
req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
|
||||
return ablkcipher_walk_done(req, walk, -EINVAL);
|
||||
}
|
||||
|
||||
walk->flags &= ~ABLKCIPHER_WALK_SLOW;
|
||||
src = dst = NULL;
|
||||
|
||||
bsize = min(walk->blocksize, n);
|
||||
n = scatterwalk_clamp(&walk->in, n);
|
||||
n = scatterwalk_clamp(&walk->out, n);
|
||||
|
||||
if (n < bsize ||
|
||||
!scatterwalk_aligned(&walk->in, alignmask) ||
|
||||
!scatterwalk_aligned(&walk->out, alignmask)) {
|
||||
err = ablkcipher_next_slow(req, walk, bsize, alignmask,
|
||||
&src, &dst);
|
||||
goto set_phys_lowmem;
|
||||
}
|
||||
|
||||
walk->nbytes = n;
|
||||
|
||||
return ablkcipher_next_fast(req, walk);
|
||||
|
||||
set_phys_lowmem:
|
||||
if (err >= 0) {
|
||||
walk->src.page = virt_to_page(src);
|
||||
walk->dst.page = virt_to_page(dst);
|
||||
walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
|
||||
walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ablkcipher_walk_first(struct ablkcipher_request *req,
|
||||
struct ablkcipher_walk *walk)
|
||||
{
|
||||
struct crypto_tfm *tfm = req->base.tfm;
|
||||
unsigned int alignmask;
|
||||
|
||||
alignmask = crypto_tfm_alg_alignmask(tfm);
|
||||
if (WARN_ON_ONCE(in_irq()))
|
||||
return -EDEADLK;
|
||||
|
||||
walk->iv = req->info;
|
||||
walk->nbytes = walk->total;
|
||||
if (unlikely(!walk->total))
|
||||
return 0;
|
||||
|
||||
walk->iv_buffer = NULL;
|
||||
if (unlikely(((unsigned long)walk->iv & alignmask))) {
|
||||
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
scatterwalk_start(&walk->in, walk->in.sg);
|
||||
scatterwalk_start(&walk->out, walk->out.sg);
|
||||
|
||||
return ablkcipher_walk_next(req, walk);
|
||||
}
|
||||
|
||||
int ablkcipher_walk_phys(struct ablkcipher_request *req,
|
||||
struct ablkcipher_walk *walk)
|
||||
{
|
||||
walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
|
||||
return ablkcipher_walk_first(req, walk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
|
||||
|
||||
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
|
||||
unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
|
||||
int ret;
|
||||
u8 *buffer, *alignbuffer;
|
||||
unsigned long absize;
|
||||
|
||||
absize = keylen + alignmask;
|
||||
buffer = kmalloc(absize, GFP_ATOMIC);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
|
||||
memcpy(alignbuffer, key, keylen);
|
||||
ret = cipher->setkey(tfm, alignbuffer, keylen);
|
||||
memset(alignbuffer, 0, keylen);
|
||||
kfree(buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
|
||||
unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
|
||||
|
||||
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
|
||||
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((unsigned long)key & alignmask)
|
||||
return setkey_unaligned(tfm, key, keylen);
|
||||
|
||||
return cipher->setkey(tfm, key, keylen);
|
||||
}
|
||||
|
||||
static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
return alg->cra_ctxsize;
|
||||
}
|
||||
|
||||
static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
|
||||
struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
|
||||
|
||||
if (alg->ivsize > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
crt->setkey = setkey;
|
||||
crt->encrypt = alg->encrypt;
|
||||
crt->decrypt = alg->decrypt;
|
||||
crt->base = __crypto_ablkcipher_cast(tfm);
|
||||
crt->ivsize = alg->ivsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_report_blkcipher rblkcipher;
|
||||
|
||||
memset(&rblkcipher, 0, sizeof(rblkcipher));
|
||||
|
||||
strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
|
||||
strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
|
||||
|
||||
rblkcipher.blocksize = alg->cra_blocksize;
|
||||
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
|
||||
rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
|
||||
rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
|
||||
|
||||
return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
|
||||
sizeof(rblkcipher), &rblkcipher);
|
||||
}
|
||||
#else
|
||||
static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
__maybe_unused;
|
||||
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
{
|
||||
struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
|
||||
|
||||
seq_printf(m, "type : ablkcipher\n");
|
||||
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
|
||||
"yes" : "no");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
|
||||
seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
|
||||
seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
|
||||
seq_printf(m, "geniv : <default>\n");
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_ablkcipher_type = {
|
||||
.ctxsize = crypto_ablkcipher_ctxsize,
|
||||
.init = crypto_init_ablkcipher_ops,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show = crypto_ablkcipher_show,
|
||||
#endif
|
||||
.report = crypto_ablkcipher_report,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
|
|
@ -33,6 +33,7 @@
|
|||
#include <crypto/b128ops.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/poly1305.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/nhpoly1305.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
|
@ -242,11 +243,11 @@ static void adiantum_hash_header(struct skcipher_request *req)
|
|||
|
||||
BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
|
||||
poly1305_core_blocks(&state, &tctx->header_hash_key,
|
||||
&header, sizeof(header) / POLY1305_BLOCK_SIZE);
|
||||
&header, sizeof(header) / POLY1305_BLOCK_SIZE, 1);
|
||||
|
||||
BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
|
||||
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
|
||||
TWEAK_SIZE / POLY1305_BLOCK_SIZE);
|
||||
TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
|
||||
|
||||
poly1305_core_emit(&state, &rctx->header_hash);
|
||||
}
|
||||
|
|
165
crypto/aead.c
165
crypto/aead.c
|
@ -7,19 +7,14 @@
|
|||
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*/
|
||||
|
||||
#include <crypto/internal/geniv.h>
|
||||
#include <crypto/internal/rng.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/cryptouser.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <net/netlink.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
@ -212,162 +207,6 @@ static const struct crypto_type crypto_aead_type = {
|
|||
.tfmsize = offsetof(struct crypto_aead, base),
|
||||
};
|
||||
|
||||
static int aead_geniv_setkey(struct crypto_aead *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
return crypto_aead_setkey(ctx->child, key, keylen);
|
||||
}
|
||||
|
||||
static int aead_geniv_setauthsize(struct crypto_aead *tfm,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
return crypto_aead_setauthsize(ctx->child, authsize);
|
||||
}
|
||||
|
||||
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
struct rtattr **tb, u32 type, u32 mask)
|
||||
{
|
||||
const char *name;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct crypto_attr_type *algt;
|
||||
struct aead_instance *inst;
|
||||
struct aead_alg *alg;
|
||||
unsigned int ivsize;
|
||||
unsigned int maxauthsize;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(name))
|
||||
return ERR_CAST(name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spawn = aead_instance_ctx(inst);
|
||||
|
||||
/* Ignore async algorithms if necessary. */
|
||||
mask |= crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
|
||||
err = crypto_grab_aead(spawn, name, type, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_spawn_aead_alg(spawn);
|
||||
|
||||
ivsize = crypto_aead_alg_ivsize(alg);
|
||||
maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
||||
|
||||
err = -EINVAL;
|
||||
if (ivsize < sizeof(u64))
|
||||
goto err_drop_alg;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->base.cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->base.cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
|
||||
|
||||
inst->alg.setkey = aead_geniv_setkey;
|
||||
inst->alg.setauthsize = aead_geniv_setauthsize;
|
||||
|
||||
inst->alg.ivsize = ivsize;
|
||||
inst->alg.maxauthsize = maxauthsize;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
err_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_alloc);
|
||||
|
||||
void aead_geniv_free(struct aead_instance *inst)
|
||||
{
|
||||
crypto_drop_aead(aead_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_free);
|
||||
|
||||
int aead_init_geniv(struct crypto_aead *aead)
|
||||
{
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct aead_instance *inst = aead_alg_instance(aead);
|
||||
struct crypto_aead *child;
|
||||
int err;
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
err = crypto_get_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
|
||||
crypto_aead_ivsize(aead));
|
||||
crypto_put_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ctx->sknull = crypto_get_default_null_skcipher();
|
||||
err = PTR_ERR(ctx->sknull);
|
||||
if (IS_ERR(ctx->sknull))
|
||||
goto out;
|
||||
|
||||
child = crypto_spawn_aead(aead_instance_ctx(inst));
|
||||
err = PTR_ERR(child);
|
||||
if (IS_ERR(child))
|
||||
goto drop_null;
|
||||
|
||||
ctx->child = child;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
|
||||
sizeof(struct aead_request));
|
||||
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
drop_null:
|
||||
crypto_put_default_null_skcipher();
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_init_geniv);
|
||||
|
||||
void aead_exit_geniv(struct crypto_aead *tfm)
|
||||
{
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
crypto_free_aead(ctx->child);
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_exit_geniv);
|
||||
|
||||
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
@ -35,15 +36,7 @@ struct aegis_ctx {
|
|||
union aegis_block key;
|
||||
};
|
||||
|
||||
struct aegis128_ops {
|
||||
int (*skcipher_walk_init)(struct skcipher_walk *walk,
|
||||
struct aead_request *req, bool atomic);
|
||||
|
||||
void (*crypt_chunk)(struct aegis_state *state, u8 *dst,
|
||||
const u8 *src, unsigned int size);
|
||||
};
|
||||
|
||||
static bool have_simd;
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_simd);
|
||||
|
||||
static const union aegis_block crypto_aegis_const[2] = {
|
||||
{ .words64 = {
|
||||
|
@ -59,7 +52,7 @@ static const union aegis_block crypto_aegis_const[2] = {
|
|||
static bool aegis128_do_simd(void)
|
||||
{
|
||||
#ifdef CONFIG_CRYPTO_AEGIS128_SIMD
|
||||
if (have_simd)
|
||||
if (static_branch_likely(&have_simd))
|
||||
return crypto_simd_usable();
|
||||
#endif
|
||||
return false;
|
||||
|
@ -67,10 +60,16 @@ static bool aegis128_do_simd(void)
|
|||
|
||||
bool crypto_aegis128_have_simd(void);
|
||||
void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg);
|
||||
void crypto_aegis128_init_simd(struct aegis_state *state,
|
||||
const union aegis_block *key,
|
||||
const u8 *iv);
|
||||
void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
|
||||
const u8 *src, unsigned int size);
|
||||
void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
|
||||
const u8 *src, unsigned int size);
|
||||
void crypto_aegis128_final_simd(struct aegis_state *state,
|
||||
union aegis_block *tag_xor,
|
||||
u64 assoclen, u64 cryptlen);
|
||||
|
||||
static void crypto_aegis128_update(struct aegis_state *state)
|
||||
{
|
||||
|
@ -323,25 +322,27 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
|
|||
}
|
||||
}
|
||||
|
||||
static void crypto_aegis128_process_crypt(struct aegis_state *state,
|
||||
struct aead_request *req,
|
||||
const struct aegis128_ops *ops)
|
||||
static __always_inline
|
||||
int crypto_aegis128_process_crypt(struct aegis_state *state,
|
||||
struct aead_request *req,
|
||||
struct skcipher_walk *walk,
|
||||
void (*crypt)(struct aegis_state *state,
|
||||
u8 *dst, const u8 *src,
|
||||
unsigned int size))
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
int err = 0;
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, false);
|
||||
while (walk->nbytes) {
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
while (walk.nbytes) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
if (nbytes < walk->total)
|
||||
nbytes = round_down(nbytes, walk->stride);
|
||||
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
crypt(state, walk->dst.virt.addr, walk->src.virt.addr, nbytes);
|
||||
|
||||
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes);
|
||||
|
||||
skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
err = skcipher_walk_done(walk, walk->nbytes - nbytes);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_aegis128_final(struct aegis_state *state,
|
||||
|
@ -390,39 +391,31 @@ static int crypto_aegis128_setauthsize(struct crypto_aead *tfm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_aegis128_crypt(struct aead_request *req,
|
||||
union aegis_block *tag_xor,
|
||||
unsigned int cryptlen,
|
||||
const struct aegis128_ops *ops)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct aegis_state state;
|
||||
|
||||
crypto_aegis128_init(&state, &ctx->key, req->iv);
|
||||
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
|
||||
crypto_aegis128_process_crypt(&state, req, ops);
|
||||
crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen);
|
||||
}
|
||||
|
||||
static int crypto_aegis128_encrypt(struct aead_request *req)
|
||||
{
|
||||
const struct aegis128_ops *ops = &(struct aegis128_ops){
|
||||
.skcipher_walk_init = skcipher_walk_aead_encrypt,
|
||||
.crypt_chunk = crypto_aegis128_encrypt_chunk,
|
||||
};
|
||||
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
union aegis_block tag = {};
|
||||
unsigned int authsize = crypto_aead_authsize(tfm);
|
||||
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
struct skcipher_walk walk;
|
||||
struct aegis_state state;
|
||||
|
||||
if (aegis128_do_simd())
|
||||
ops = &(struct aegis128_ops){
|
||||
.skcipher_walk_init = skcipher_walk_aead_encrypt,
|
||||
.crypt_chunk = crypto_aegis128_encrypt_chunk_simd };
|
||||
|
||||
crypto_aegis128_crypt(req, &tag, cryptlen, ops);
|
||||
skcipher_walk_aead_encrypt(&walk, req, false);
|
||||
if (aegis128_do_simd()) {
|
||||
crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
|
||||
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
|
||||
crypto_aegis128_process_crypt(&state, req, &walk,
|
||||
crypto_aegis128_encrypt_chunk_simd);
|
||||
crypto_aegis128_final_simd(&state, &tag, req->assoclen,
|
||||
cryptlen);
|
||||
} else {
|
||||
crypto_aegis128_init(&state, &ctx->key, req->iv);
|
||||
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
|
||||
crypto_aegis128_process_crypt(&state, req, &walk,
|
||||
crypto_aegis128_encrypt_chunk);
|
||||
crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
|
||||
}
|
||||
|
||||
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
|
||||
authsize, 1);
|
||||
|
@ -431,26 +424,33 @@ static int crypto_aegis128_encrypt(struct aead_request *req)
|
|||
|
||||
static int crypto_aegis128_decrypt(struct aead_request *req)
|
||||
{
|
||||
const struct aegis128_ops *ops = &(struct aegis128_ops){
|
||||
.skcipher_walk_init = skcipher_walk_aead_decrypt,
|
||||
.crypt_chunk = crypto_aegis128_decrypt_chunk,
|
||||
};
|
||||
static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {};
|
||||
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
union aegis_block tag;
|
||||
unsigned int authsize = crypto_aead_authsize(tfm);
|
||||
unsigned int cryptlen = req->cryptlen - authsize;
|
||||
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
struct aegis_state state;
|
||||
|
||||
scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
if (aegis128_do_simd())
|
||||
ops = &(struct aegis128_ops){
|
||||
.skcipher_walk_init = skcipher_walk_aead_decrypt,
|
||||
.crypt_chunk = crypto_aegis128_decrypt_chunk_simd };
|
||||
|
||||
crypto_aegis128_crypt(req, &tag, cryptlen, ops);
|
||||
skcipher_walk_aead_decrypt(&walk, req, false);
|
||||
if (aegis128_do_simd()) {
|
||||
crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
|
||||
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
|
||||
crypto_aegis128_process_crypt(&state, req, &walk,
|
||||
crypto_aegis128_decrypt_chunk_simd);
|
||||
crypto_aegis128_final_simd(&state, &tag, req->assoclen,
|
||||
cryptlen);
|
||||
} else {
|
||||
crypto_aegis128_init(&state, &ctx->key, req->iv);
|
||||
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
|
||||
crypto_aegis128_process_crypt(&state, req, &walk,
|
||||
crypto_aegis128_decrypt_chunk);
|
||||
crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
|
||||
}
|
||||
|
||||
return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
|
||||
}
|
||||
|
@ -481,8 +481,9 @@ static struct aead_alg crypto_aegis128_alg = {
|
|||
|
||||
static int __init crypto_aegis128_module_init(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD))
|
||||
have_simd = crypto_aegis128_have_simd();
|
||||
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) &&
|
||||
crypto_aegis128_have_simd())
|
||||
static_branch_enable(&have_simd);
|
||||
|
||||
return crypto_register_aead(&crypto_aegis128_alg);
|
||||
}
|
||||
|
|
|
@ -132,6 +132,36 @@ void preload_sbox(void)
|
|||
:: "r"(crypto_aes_sbox));
|
||||
}
|
||||
|
||||
void crypto_aegis128_init_neon(void *state, const void *key, const void *iv)
|
||||
{
|
||||
static const uint8_t const0[] = {
|
||||
0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d,
|
||||
0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62,
|
||||
};
|
||||
static const uint8_t const1[] = {
|
||||
0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1,
|
||||
0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd,
|
||||
};
|
||||
uint8x16_t k = vld1q_u8(key);
|
||||
uint8x16_t kiv = k ^ vld1q_u8(iv);
|
||||
struct aegis128_state st = {{
|
||||
kiv,
|
||||
vld1q_u8(const1),
|
||||
vld1q_u8(const0),
|
||||
k ^ vld1q_u8(const0),
|
||||
k ^ vld1q_u8(const1),
|
||||
}};
|
||||
int i;
|
||||
|
||||
preload_sbox();
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
st = aegis128_update_neon(st, k);
|
||||
st = aegis128_update_neon(st, kiv);
|
||||
}
|
||||
aegis128_save_state_neon(st, state);
|
||||
}
|
||||
|
||||
void crypto_aegis128_update_neon(void *state, const void *msg)
|
||||
{
|
||||
struct aegis128_state st = aegis128_load_state_neon(state);
|
||||
|
@ -210,3 +240,23 @@ void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
|
|||
|
||||
aegis128_save_state_neon(st, state);
|
||||
}
|
||||
|
||||
void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
|
||||
uint64_t cryptlen)
|
||||
{
|
||||
struct aegis128_state st = aegis128_load_state_neon(state);
|
||||
uint8x16_t v;
|
||||
int i;
|
||||
|
||||
preload_sbox();
|
||||
|
||||
v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8 * assoclen),
|
||||
vmov_n_u64(8 * cryptlen));
|
||||
|
||||
for (i = 0; i < 7; i++)
|
||||
st = aegis128_update_neon(st, v);
|
||||
|
||||
v = vld1q_u8(tag_xor);
|
||||
v ^= st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4];
|
||||
vst1q_u8(tag_xor, v);
|
||||
}
|
||||
|
|
|
@ -8,11 +8,14 @@
|
|||
|
||||
#include "aegis.h"
|
||||
|
||||
void crypto_aegis128_init_neon(void *state, const void *key, const void *iv);
|
||||
void crypto_aegis128_update_neon(void *state, const void *msg);
|
||||
void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
|
||||
unsigned int size);
|
||||
void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
|
||||
unsigned int size);
|
||||
void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
|
||||
uint64_t cryptlen);
|
||||
|
||||
int aegis128_have_aes_insn __ro_after_init;
|
||||
|
||||
|
@ -25,6 +28,15 @@ bool crypto_aegis128_have_simd(void)
|
|||
return IS_ENABLED(CONFIG_ARM64);
|
||||
}
|
||||
|
||||
void crypto_aegis128_init_simd(union aegis_block *state,
|
||||
const union aegis_block *key,
|
||||
const u8 *iv)
|
||||
{
|
||||
kernel_neon_begin();
|
||||
crypto_aegis128_init_neon(state, key, iv);
|
||||
kernel_neon_end();
|
||||
}
|
||||
|
||||
void crypto_aegis128_update_simd(union aegis_block *state, const void *msg)
|
||||
{
|
||||
kernel_neon_begin();
|
||||
|
@ -47,3 +59,12 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
|
|||
crypto_aegis128_decrypt_chunk_neon(state, dst, src, size);
|
||||
kernel_neon_end();
|
||||
}
|
||||
|
||||
void crypto_aegis128_final_simd(union aegis_block *state,
|
||||
union aegis_block *tag_xor,
|
||||
u64 assoclen, u64 cryptlen)
|
||||
{
|
||||
kernel_neon_begin();
|
||||
crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen);
|
||||
kernel_neon_end();
|
||||
}
|
||||
|
|
|
@ -1043,7 +1043,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
|
|||
af_alg_free_resources(areq);
|
||||
sock_put(sk);
|
||||
|
||||
iocb->ki_complete(iocb, err ? err : resultlen, 0);
|
||||
iocb->ki_complete(iocb, err ? err : (int)resultlen, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_async_cb);
|
||||
|
||||
|
|
|
@ -1052,32 +1052,6 @@ void crypto_stats_get(struct crypto_alg *alg)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_stats_get);
|
||||
|
||||
void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret,
|
||||
struct crypto_alg *alg)
|
||||
{
|
||||
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
|
||||
atomic64_inc(&alg->stats.cipher.err_cnt);
|
||||
} else {
|
||||
atomic64_inc(&alg->stats.cipher.encrypt_cnt);
|
||||
atomic64_add(nbytes, &alg->stats.cipher.encrypt_tlen);
|
||||
}
|
||||
crypto_alg_put(alg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_encrypt);
|
||||
|
||||
void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret,
|
||||
struct crypto_alg *alg)
|
||||
{
|
||||
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
|
||||
atomic64_inc(&alg->stats.cipher.err_cnt);
|
||||
} else {
|
||||
atomic64_inc(&alg->stats.cipher.decrypt_cnt);
|
||||
atomic64_add(nbytes, &alg->stats.cipher.decrypt_tlen);
|
||||
}
|
||||
crypto_alg_put(alg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_decrypt);
|
||||
|
||||
void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
|
||||
int ret)
|
||||
{
|
||||
|
|
|
@ -56,7 +56,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct af_alg_ctx *ctx = ask->private;
|
||||
struct crypto_skcipher *tfm = pask->private;
|
||||
unsigned int bs = crypto_skcipher_blocksize(tfm);
|
||||
unsigned int bs = crypto_skcipher_chunksize(tfm);
|
||||
struct af_alg_async_req *areq;
|
||||
int err = 0;
|
||||
size_t len = 0;
|
||||
|
|
|
@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
|
|||
*
|
||||
* The returned transform is of a non-determinate type. Most people
|
||||
* should use one of the more specific allocation functions such as
|
||||
* crypto_alloc_blkcipher.
|
||||
* crypto_alloc_skcipher().
|
||||
*
|
||||
* In case of error the return value is an error pointer.
|
||||
*/
|
||||
|
@ -608,3 +608,4 @@ EXPORT_SYMBOL_GPL(crypto_req_done);
|
|||
|
||||
MODULE_DESCRIPTION("Cryptographic core API");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_SOFTDEP("pre: cryptomgr");
|
||||
|
|
|
@ -0,0 +1,320 @@
|
|||
// SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0)
|
||||
/*
|
||||
* BLAKE2b reference source code package - reference C implementations
|
||||
*
|
||||
* Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
|
||||
* terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
|
||||
* your option. The terms of these licenses can be found at:
|
||||
*
|
||||
* - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
|
||||
* - OpenSSL license : https://www.openssl.org/source/license.html
|
||||
* - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* More information about the BLAKE2 hash function can be found at
|
||||
* https://blake2.net.
|
||||
*
|
||||
* Note: the original sources have been modified for inclusion in linux kernel
|
||||
* in terms of coding style, using generic helpers and simplifications of error
|
||||
* handling.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#define BLAKE2B_160_DIGEST_SIZE (160 / 8)
|
||||
#define BLAKE2B_256_DIGEST_SIZE (256 / 8)
|
||||
#define BLAKE2B_384_DIGEST_SIZE (384 / 8)
|
||||
#define BLAKE2B_512_DIGEST_SIZE (512 / 8)
|
||||
|
||||
enum blake2b_constant {
|
||||
BLAKE2B_BLOCKBYTES = 128,
|
||||
BLAKE2B_KEYBYTES = 64,
|
||||
};
|
||||
|
||||
struct blake2b_state {
|
||||
u64 h[8];
|
||||
u64 t[2];
|
||||
u64 f[2];
|
||||
u8 buf[BLAKE2B_BLOCKBYTES];
|
||||
size_t buflen;
|
||||
};
|
||||
|
||||
static const u64 blake2b_IV[8] = {
|
||||
0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL,
|
||||
0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
|
||||
0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL,
|
||||
0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
|
||||
};
|
||||
|
||||
static const u8 blake2b_sigma[12][16] = {
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
|
||||
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
|
||||
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
|
||||
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
|
||||
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
|
||||
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
|
||||
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
|
||||
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
|
||||
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
|
||||
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
|
||||
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
|
||||
};
|
||||
|
||||
static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc)
|
||||
{
|
||||
S->t[0] += inc;
|
||||
S->t[1] += (S->t[0] < inc);
|
||||
}
|
||||
|
||||
#define G(r,i,a,b,c,d) \
|
||||
do { \
|
||||
a = a + b + m[blake2b_sigma[r][2*i+0]]; \
|
||||
d = ror64(d ^ a, 32); \
|
||||
c = c + d; \
|
||||
b = ror64(b ^ c, 24); \
|
||||
a = a + b + m[blake2b_sigma[r][2*i+1]]; \
|
||||
d = ror64(d ^ a, 16); \
|
||||
c = c + d; \
|
||||
b = ror64(b ^ c, 63); \
|
||||
} while (0)
|
||||
|
||||
#define ROUND(r) \
|
||||
do { \
|
||||
G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
|
||||
G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
|
||||
G(r,2,v[ 2],v[ 6],v[10],v[14]); \
|
||||
G(r,3,v[ 3],v[ 7],v[11],v[15]); \
|
||||
G(r,4,v[ 0],v[ 5],v[10],v[15]); \
|
||||
G(r,5,v[ 1],v[ 6],v[11],v[12]); \
|
||||
G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
|
||||
G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
|
||||
} while (0)
|
||||
|
||||
static void blake2b_compress(struct blake2b_state *S,
|
||||
const u8 block[BLAKE2B_BLOCKBYTES])
|
||||
{
|
||||
u64 m[16];
|
||||
u64 v[16];
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < 16; ++i)
|
||||
m[i] = get_unaligned_le64(block + i * sizeof(m[i]));
|
||||
|
||||
for (i = 0; i < 8; ++i)
|
||||
v[i] = S->h[i];
|
||||
|
||||
v[ 8] = blake2b_IV[0];
|
||||
v[ 9] = blake2b_IV[1];
|
||||
v[10] = blake2b_IV[2];
|
||||
v[11] = blake2b_IV[3];
|
||||
v[12] = blake2b_IV[4] ^ S->t[0];
|
||||
v[13] = blake2b_IV[5] ^ S->t[1];
|
||||
v[14] = blake2b_IV[6] ^ S->f[0];
|
||||
v[15] = blake2b_IV[7] ^ S->f[1];
|
||||
|
||||
ROUND(0);
|
||||
ROUND(1);
|
||||
ROUND(2);
|
||||
ROUND(3);
|
||||
ROUND(4);
|
||||
ROUND(5);
|
||||
ROUND(6);
|
||||
ROUND(7);
|
||||
ROUND(8);
|
||||
ROUND(9);
|
||||
ROUND(10);
|
||||
ROUND(11);
|
||||
|
||||
for (i = 0; i < 8; ++i)
|
||||
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
|
||||
}
|
||||
|
||||
#undef G
|
||||
#undef ROUND
|
||||
|
||||
struct blake2b_tfm_ctx {
|
||||
u8 key[BLAKE2B_KEYBYTES];
|
||||
unsigned int keylen;
|
||||
};
|
||||
|
||||
static int blake2b_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
|
||||
|
||||
if (keylen == 0 || keylen > BLAKE2B_KEYBYTES) {
|
||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(tctx->key, key, keylen);
|
||||
tctx->keylen = keylen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blake2b_init(struct shash_desc *desc)
|
||||
{
|
||||
struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||
struct blake2b_state *state = shash_desc_ctx(desc);
|
||||
const int digestsize = crypto_shash_digestsize(desc->tfm);
|
||||
|
||||
memset(state, 0, sizeof(*state));
|
||||
memcpy(state->h, blake2b_IV, sizeof(state->h));
|
||||
|
||||
/* Parameter block is all zeros except index 0, no xor for 1..7 */
|
||||
state->h[0] ^= 0x01010000 | tctx->keylen << 8 | digestsize;
|
||||
|
||||
if (tctx->keylen) {
|
||||
/*
|
||||
* Prefill the buffer with the key, next call to _update or
|
||||
* _final will process it
|
||||
*/
|
||||
memcpy(state->buf, tctx->key, tctx->keylen);
|
||||
state->buflen = BLAKE2B_BLOCKBYTES;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blake2b_update(struct shash_desc *desc, const u8 *in,
|
||||
unsigned int inlen)
|
||||
{
|
||||
struct blake2b_state *state = shash_desc_ctx(desc);
|
||||
const size_t left = state->buflen;
|
||||
const size_t fill = BLAKE2B_BLOCKBYTES - left;
|
||||
|
||||
if (!inlen)
|
||||
return 0;
|
||||
|
||||
if (inlen > fill) {
|
||||
state->buflen = 0;
|
||||
/* Fill buffer */
|
||||
memcpy(state->buf + left, in, fill);
|
||||
blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES);
|
||||
/* Compress */
|
||||
blake2b_compress(state, state->buf);
|
||||
in += fill;
|
||||
inlen -= fill;
|
||||
while (inlen > BLAKE2B_BLOCKBYTES) {
|
||||
blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES);
|
||||
blake2b_compress(state, in);
|
||||
in += BLAKE2B_BLOCKBYTES;
|
||||
inlen -= BLAKE2B_BLOCKBYTES;
|
||||
}
|
||||
}
|
||||
memcpy(state->buf + state->buflen, in, inlen);
|
||||
state->buflen += inlen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blake2b_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct blake2b_state *state = shash_desc_ctx(desc);
|
||||
const int digestsize = crypto_shash_digestsize(desc->tfm);
|
||||
size_t i;
|
||||
|
||||
blake2b_increment_counter(state, state->buflen);
|
||||
/* Set last block */
|
||||
state->f[0] = (u64)-1;
|
||||
/* Padding */
|
||||
memset(state->buf + state->buflen, 0, BLAKE2B_BLOCKBYTES - state->buflen);
|
||||
blake2b_compress(state, state->buf);
|
||||
|
||||
/* Avoid temporary buffer and switch the internal output to LE order */
|
||||
for (i = 0; i < ARRAY_SIZE(state->h); i++)
|
||||
__cpu_to_le64s(&state->h[i]);
|
||||
|
||||
memcpy(out, state->h, digestsize);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg blake2b_algs[] = {
|
||||
{
|
||||
.base.cra_name = "blake2b-160",
|
||||
.base.cra_driver_name = "blake2b-160-generic",
|
||||
.base.cra_priority = 100,
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_blocksize = BLAKE2B_BLOCKBYTES,
|
||||
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.digestsize = BLAKE2B_160_DIGEST_SIZE,
|
||||
.setkey = blake2b_setkey,
|
||||
.init = blake2b_init,
|
||||
.update = blake2b_update,
|
||||
.final = blake2b_final,
|
||||
.descsize = sizeof(struct blake2b_state),
|
||||
}, {
|
||||
.base.cra_name = "blake2b-256",
|
||||
.base.cra_driver_name = "blake2b-256-generic",
|
||||
.base.cra_priority = 100,
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_blocksize = BLAKE2B_BLOCKBYTES,
|
||||
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.digestsize = BLAKE2B_256_DIGEST_SIZE,
|
||||
.setkey = blake2b_setkey,
|
||||
.init = blake2b_init,
|
||||
.update = blake2b_update,
|
||||
.final = blake2b_final,
|
||||
.descsize = sizeof(struct blake2b_state),
|
||||
}, {
|
||||
.base.cra_name = "blake2b-384",
|
||||
.base.cra_driver_name = "blake2b-384-generic",
|
||||
.base.cra_priority = 100,
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_blocksize = BLAKE2B_BLOCKBYTES,
|
||||
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.digestsize = BLAKE2B_384_DIGEST_SIZE,
|
||||
.setkey = blake2b_setkey,
|
||||
.init = blake2b_init,
|
||||
.update = blake2b_update,
|
||||
.final = blake2b_final,
|
||||
.descsize = sizeof(struct blake2b_state),
|
||||
}, {
|
||||
.base.cra_name = "blake2b-512",
|
||||
.base.cra_driver_name = "blake2b-512-generic",
|
||||
.base.cra_priority = 100,
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_blocksize = BLAKE2B_BLOCKBYTES,
|
||||
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.digestsize = BLAKE2B_512_DIGEST_SIZE,
|
||||
.setkey = blake2b_setkey,
|
||||
.init = blake2b_init,
|
||||
.update = blake2b_update,
|
||||
.final = blake2b_final,
|
||||
.descsize = sizeof(struct blake2b_state),
|
||||
}
|
||||
};
|
||||
|
||||
static int __init blake2b_mod_init(void)
|
||||
{
|
||||
return crypto_register_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
|
||||
}
|
||||
|
||||
static void __exit blake2b_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
|
||||
}
|
||||
|
||||
subsys_initcall(blake2b_mod_init);
|
||||
module_exit(blake2b_mod_fini);
|
||||
|
||||
MODULE_AUTHOR("David Sterba <kdave@kernel.org>");
|
||||
MODULE_DESCRIPTION("BLAKE2b generic implementation");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_CRYPTO("blake2b-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2b-160-generic");
|
||||
MODULE_ALIAS_CRYPTO("blake2b-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2b-256-generic");
|
||||
MODULE_ALIAS_CRYPTO("blake2b-384");
|
||||
MODULE_ALIAS_CRYPTO("blake2b-384-generic");
|
||||
MODULE_ALIAS_CRYPTO("blake2b-512");
|
||||
MODULE_ALIAS_CRYPTO("blake2b-512-generic");
|
|
@ -0,0 +1,171 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/*
|
||||
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
|
||||
|
||||
if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) {
|
||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(tctx->key, key, keylen);
|
||||
tctx->keylen = keylen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_blake2s_init(struct shash_desc *desc)
|
||||
{
|
||||
struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
const int outlen = crypto_shash_digestsize(desc->tfm);
|
||||
|
||||
if (tctx->keylen)
|
||||
blake2s_init_key(state, outlen, tctx->key, tctx->keylen);
|
||||
else
|
||||
blake2s_init(state, outlen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in,
|
||||
unsigned int inlen)
|
||||
{
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
|
||||
|
||||
if (unlikely(!inlen))
|
||||
return 0;
|
||||
if (inlen > fill) {
|
||||
memcpy(state->buf + state->buflen, in, fill);
|
||||
blake2s_compress_generic(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
|
||||
state->buflen = 0;
|
||||
in += fill;
|
||||
inlen -= fill;
|
||||
}
|
||||
if (inlen > BLAKE2S_BLOCK_SIZE) {
|
||||
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
|
||||
/* Hash one less (full) block than strictly possible */
|
||||
blake2s_compress_generic(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
|
||||
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
||||
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
||||
}
|
||||
memcpy(state->buf + state->buflen, in, inlen);
|
||||
state->buflen += inlen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
|
||||
blake2s_set_lastblock(state);
|
||||
memset(state->buf + state->buflen, 0,
|
||||
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
|
||||
blake2s_compress_generic(state, state->buf, 1, state->buflen);
|
||||
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
|
||||
memcpy(out, state->h, state->outlen);
|
||||
memzero_explicit(state, sizeof(*state));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_algs[] = {{
|
||||
.base.cra_name = "blake2s-128",
|
||||
.base.cra_driver_name = "blake2s-128-generic",
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = BLAKE2S_128_HASH_SIZE,
|
||||
.setkey = crypto_blake2s_setkey,
|
||||
.init = crypto_blake2s_init,
|
||||
.update = crypto_blake2s_update,
|
||||
.final = crypto_blake2s_final,
|
||||
.descsize = sizeof(struct blake2s_state),
|
||||
}, {
|
||||
.base.cra_name = "blake2s-160",
|
||||
.base.cra_driver_name = "blake2s-160-generic",
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = BLAKE2S_160_HASH_SIZE,
|
||||
.setkey = crypto_blake2s_setkey,
|
||||
.init = crypto_blake2s_init,
|
||||
.update = crypto_blake2s_update,
|
||||
.final = crypto_blake2s_final,
|
||||
.descsize = sizeof(struct blake2s_state),
|
||||
}, {
|
||||
.base.cra_name = "blake2s-224",
|
||||
.base.cra_driver_name = "blake2s-224-generic",
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = BLAKE2S_224_HASH_SIZE,
|
||||
.setkey = crypto_blake2s_setkey,
|
||||
.init = crypto_blake2s_init,
|
||||
.update = crypto_blake2s_update,
|
||||
.final = crypto_blake2s_final,
|
||||
.descsize = sizeof(struct blake2s_state),
|
||||
}, {
|
||||
.base.cra_name = "blake2s-256",
|
||||
.base.cra_driver_name = "blake2s-256-generic",
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.digestsize = BLAKE2S_256_HASH_SIZE,
|
||||
.setkey = crypto_blake2s_setkey,
|
||||
.init = crypto_blake2s_init,
|
||||
.update = crypto_blake2s_update,
|
||||
.final = crypto_blake2s_final,
|
||||
.descsize = sizeof(struct blake2s_state),
|
||||
}};
|
||||
|
||||
static int __init blake2s_mod_init(void)
|
||||
{
|
||||
return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
}
|
||||
|
||||
static void __exit blake2s_mod_exit(void)
|
||||
{
|
||||
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
}
|
||||
|
||||
subsys_initcall(blake2s_mod_init);
|
||||
module_exit(blake2s_mod_exit);
|
||||
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-generic");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-generic");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-generic");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-generic");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -1,548 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Block chaining cipher operations.
|
||||
*
|
||||
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
|
||||
* multiple page boundaries by using temporary blocks. In user context,
|
||||
* the kernel is given a chance to schedule us once per page.
|
||||
*
|
||||
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*/
|
||||
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/cryptouser.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <net/netlink.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
enum {
|
||||
BLKCIPHER_WALK_PHYS = 1 << 0,
|
||||
BLKCIPHER_WALK_SLOW = 1 << 1,
|
||||
BLKCIPHER_WALK_COPY = 1 << 2,
|
||||
BLKCIPHER_WALK_DIFF = 1 << 3,
|
||||
};
|
||||
|
||||
static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk);
|
||||
static int blkcipher_walk_first(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk);
|
||||
|
||||
static inline void blkcipher_map_src(struct blkcipher_walk *walk)
|
||||
{
|
||||
walk->src.virt.addr = scatterwalk_map(&walk->in);
|
||||
}
|
||||
|
||||
static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
|
||||
{
|
||||
walk->dst.virt.addr = scatterwalk_map(&walk->out);
|
||||
}
|
||||
|
||||
static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
|
||||
{
|
||||
scatterwalk_unmap(walk->src.virt.addr);
|
||||
}
|
||||
|
||||
static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
|
||||
{
|
||||
scatterwalk_unmap(walk->dst.virt.addr);
|
||||
}
|
||||
|
||||
/* Get a spot of the specified length that does not straddle a page.
|
||||
* The caller needs to ensure that there is enough space for this operation.
|
||||
*/
|
||||
static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
|
||||
{
|
||||
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
|
||||
return max(start, end_page);
|
||||
}
|
||||
|
||||
static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
|
||||
unsigned int bsize)
|
||||
{
|
||||
u8 *addr;
|
||||
|
||||
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
|
||||
addr = blkcipher_get_spot(addr, bsize);
|
||||
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
|
||||
}
|
||||
|
||||
static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
|
||||
unsigned int n)
|
||||
{
|
||||
if (walk->flags & BLKCIPHER_WALK_COPY) {
|
||||
blkcipher_map_dst(walk);
|
||||
memcpy(walk->dst.virt.addr, walk->page, n);
|
||||
blkcipher_unmap_dst(walk);
|
||||
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
|
||||
if (walk->flags & BLKCIPHER_WALK_DIFF)
|
||||
blkcipher_unmap_dst(walk);
|
||||
blkcipher_unmap_src(walk);
|
||||
}
|
||||
|
||||
scatterwalk_advance(&walk->in, n);
|
||||
scatterwalk_advance(&walk->out, n);
|
||||
}
|
||||
|
||||
int blkcipher_walk_done(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk, int err)
|
||||
{
|
||||
unsigned int n; /* bytes processed */
|
||||
bool more;
|
||||
|
||||
if (unlikely(err < 0))
|
||||
goto finish;
|
||||
|
||||
n = walk->nbytes - err;
|
||||
walk->total -= n;
|
||||
more = (walk->total != 0);
|
||||
|
||||
if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
|
||||
blkcipher_done_fast(walk, n);
|
||||
} else {
|
||||
if (WARN_ON(err)) {
|
||||
/* unexpected case; didn't process all bytes */
|
||||
err = -EINVAL;
|
||||
goto finish;
|
||||
}
|
||||
blkcipher_done_slow(walk, n);
|
||||
}
|
||||
|
||||
scatterwalk_done(&walk->in, 0, more);
|
||||
scatterwalk_done(&walk->out, 1, more);
|
||||
|
||||
if (more) {
|
||||
crypto_yield(desc->flags);
|
||||
return blkcipher_walk_next(desc, walk);
|
||||
}
|
||||
err = 0;
|
||||
finish:
|
||||
walk->nbytes = 0;
|
||||
if (walk->iv != desc->info)
|
||||
memcpy(desc->info, walk->iv, walk->ivsize);
|
||||
if (walk->buffer != walk->page)
|
||||
kfree(walk->buffer);
|
||||
if (walk->page)
|
||||
free_page((unsigned long)walk->page);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
|
||||
|
||||
static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
unsigned int bsize,
|
||||
unsigned int alignmask)
|
||||
{
|
||||
unsigned int n;
|
||||
unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
|
||||
|
||||
if (walk->buffer)
|
||||
goto ok;
|
||||
|
||||
walk->buffer = walk->page;
|
||||
if (walk->buffer)
|
||||
goto ok;
|
||||
|
||||
n = aligned_bsize * 3 - (alignmask + 1) +
|
||||
(alignmask & ~(crypto_tfm_ctx_alignment() - 1));
|
||||
walk->buffer = kmalloc(n, GFP_ATOMIC);
|
||||
if (!walk->buffer)
|
||||
return blkcipher_walk_done(desc, walk, -ENOMEM);
|
||||
|
||||
ok:
|
||||
walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
|
||||
alignmask + 1);
|
||||
walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
|
||||
walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
|
||||
aligned_bsize, bsize);
|
||||
|
||||
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
|
||||
|
||||
walk->nbytes = bsize;
|
||||
walk->flags |= BLKCIPHER_WALK_SLOW;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
|
||||
{
|
||||
u8 *tmp = walk->page;
|
||||
|
||||
blkcipher_map_src(walk);
|
||||
memcpy(tmp, walk->src.virt.addr, walk->nbytes);
|
||||
blkcipher_unmap_src(walk);
|
||||
|
||||
walk->src.virt.addr = tmp;
|
||||
walk->dst.virt.addr = tmp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
unsigned long diff;
|
||||
|
||||
walk->src.phys.page = scatterwalk_page(&walk->in);
|
||||
walk->src.phys.offset = offset_in_page(walk->in.offset);
|
||||
walk->dst.phys.page = scatterwalk_page(&walk->out);
|
||||
walk->dst.phys.offset = offset_in_page(walk->out.offset);
|
||||
|
||||
if (walk->flags & BLKCIPHER_WALK_PHYS)
|
||||
return 0;
|
||||
|
||||
diff = walk->src.phys.offset - walk->dst.phys.offset;
|
||||
diff |= walk->src.virt.page - walk->dst.virt.page;
|
||||
|
||||
blkcipher_map_src(walk);
|
||||
walk->dst.virt.addr = walk->src.virt.addr;
|
||||
|
||||
if (diff) {
|
||||
walk->flags |= BLKCIPHER_WALK_DIFF;
|
||||
blkcipher_map_dst(walk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
unsigned int bsize;
|
||||
unsigned int n;
|
||||
int err;
|
||||
|
||||
n = walk->total;
|
||||
if (unlikely(n < walk->cipher_blocksize)) {
|
||||
desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
|
||||
return blkcipher_walk_done(desc, walk, -EINVAL);
|
||||
}
|
||||
|
||||
bsize = min(walk->walk_blocksize, n);
|
||||
|
||||
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
|
||||
BLKCIPHER_WALK_DIFF);
|
||||
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
|
||||
!scatterwalk_aligned(&walk->out, walk->alignmask)) {
|
||||
walk->flags |= BLKCIPHER_WALK_COPY;
|
||||
if (!walk->page) {
|
||||
walk->page = (void *)__get_free_page(GFP_ATOMIC);
|
||||
if (!walk->page)
|
||||
n = 0;
|
||||
}
|
||||
}
|
||||
|
||||
n = scatterwalk_clamp(&walk->in, n);
|
||||
n = scatterwalk_clamp(&walk->out, n);
|
||||
|
||||
if (unlikely(n < bsize)) {
|
||||
err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
|
||||
goto set_phys_lowmem;
|
||||
}
|
||||
|
||||
walk->nbytes = n;
|
||||
if (walk->flags & BLKCIPHER_WALK_COPY) {
|
||||
err = blkcipher_next_copy(walk);
|
||||
goto set_phys_lowmem;
|
||||
}
|
||||
|
||||
return blkcipher_next_fast(desc, walk);
|
||||
|
||||
set_phys_lowmem:
|
||||
if (walk->flags & BLKCIPHER_WALK_PHYS) {
|
||||
walk->src.phys.page = virt_to_page(walk->src.virt.addr);
|
||||
walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
|
||||
walk->src.phys.offset &= PAGE_SIZE - 1;
|
||||
walk->dst.phys.offset &= PAGE_SIZE - 1;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
|
||||
{
|
||||
unsigned bs = walk->walk_blocksize;
|
||||
unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
|
||||
unsigned int size = aligned_bs * 2 +
|
||||
walk->ivsize + max(aligned_bs, walk->ivsize) -
|
||||
(walk->alignmask + 1);
|
||||
u8 *iv;
|
||||
|
||||
size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
|
||||
walk->buffer = kmalloc(size, GFP_ATOMIC);
|
||||
if (!walk->buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
|
||||
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
|
||||
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
|
||||
iv = blkcipher_get_spot(iv, walk->ivsize);
|
||||
|
||||
walk->iv = memcpy(iv, walk->iv, walk->ivsize);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int blkcipher_walk_virt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
||||
walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
|
||||
walk->cipher_blocksize = walk->walk_blocksize;
|
||||
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
|
||||
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
|
||||
return blkcipher_walk_first(desc, walk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
|
||||
|
||||
int blkcipher_walk_phys(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
walk->flags |= BLKCIPHER_WALK_PHYS;
|
||||
walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
|
||||
walk->cipher_blocksize = walk->walk_blocksize;
|
||||
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
|
||||
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
|
||||
return blkcipher_walk_first(desc, walk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
|
||||
|
||||
static int blkcipher_walk_first(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
if (WARN_ON_ONCE(in_irq()))
|
||||
return -EDEADLK;
|
||||
|
||||
walk->iv = desc->info;
|
||||
walk->nbytes = walk->total;
|
||||
if (unlikely(!walk->total))
|
||||
return 0;
|
||||
|
||||
walk->buffer = NULL;
|
||||
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
|
||||
int err = blkcipher_copy_iv(walk);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
scatterwalk_start(&walk->in, walk->in.sg);
|
||||
scatterwalk_start(&walk->out, walk->out.sg);
|
||||
walk->page = NULL;
|
||||
|
||||
return blkcipher_walk_next(desc, walk);
|
||||
}
|
||||
|
||||
int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
unsigned int blocksize)
|
||||
{
|
||||
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
||||
walk->walk_blocksize = blocksize;
|
||||
walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
|
||||
walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
|
||||
walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
|
||||
return blkcipher_walk_first(desc, walk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
|
||||
|
||||
int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
struct crypto_aead *tfm,
|
||||
unsigned int blocksize)
|
||||
{
|
||||
walk->flags &= ~BLKCIPHER_WALK_PHYS;
|
||||
walk->walk_blocksize = blocksize;
|
||||
walk->cipher_blocksize = crypto_aead_blocksize(tfm);
|
||||
walk->ivsize = crypto_aead_ivsize(tfm);
|
||||
walk->alignmask = crypto_aead_alignmask(tfm);
|
||||
return blkcipher_walk_first(desc, walk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
|
||||
|
||||
static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
|
||||
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
|
||||
int ret;
|
||||
u8 *buffer, *alignbuffer;
|
||||
unsigned long absize;
|
||||
|
||||
absize = keylen + alignmask;
|
||||
buffer = kmalloc(absize, GFP_ATOMIC);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
|
||||
memcpy(alignbuffer, key, keylen);
|
||||
ret = cipher->setkey(tfm, alignbuffer, keylen);
|
||||
memset(alignbuffer, 0, keylen);
|
||||
kfree(buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
|
||||
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
|
||||
|
||||
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((unsigned long)key & alignmask)
|
||||
return setkey_unaligned(tfm, key, keylen);
|
||||
|
||||
return cipher->setkey(tfm, key, keylen);
|
||||
}
|
||||
|
||||
static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
|
||||
}
|
||||
|
||||
static int async_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_tfm *tfm = req->base.tfm;
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
struct blkcipher_desc desc = {
|
||||
.tfm = __crypto_blkcipher_cast(tfm),
|
||||
.info = req->info,
|
||||
.flags = req->base.flags,
|
||||
};
|
||||
|
||||
|
||||
return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
|
||||
}
|
||||
|
||||
static int async_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_tfm *tfm = req->base.tfm;
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
struct blkcipher_desc desc = {
|
||||
.tfm = __crypto_blkcipher_cast(tfm),
|
||||
.info = req->info,
|
||||
.flags = req->base.flags,
|
||||
};
|
||||
|
||||
return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
|
||||
}
|
||||
|
||||
static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
struct blkcipher_alg *cipher = &alg->cra_blkcipher;
|
||||
unsigned int len = alg->cra_ctxsize;
|
||||
|
||||
if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
|
||||
cipher->ivsize) {
|
||||
len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
|
||||
len += cipher->ivsize;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
|
||||
crt->setkey = async_setkey;
|
||||
crt->encrypt = async_encrypt;
|
||||
crt->decrypt = async_decrypt;
|
||||
crt->base = __crypto_ablkcipher_cast(tfm);
|
||||
crt->ivsize = alg->ivsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
|
||||
unsigned long addr;
|
||||
|
||||
crt->setkey = setkey;
|
||||
crt->encrypt = alg->encrypt;
|
||||
crt->decrypt = alg->decrypt;
|
||||
|
||||
addr = (unsigned long)crypto_tfm_ctx(tfm);
|
||||
addr = ALIGN(addr, align);
|
||||
addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
|
||||
crt->iv = (void *)addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
||||
{
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
|
||||
if (alg->ivsize > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
|
||||
return crypto_init_blkcipher_ops_sync(tfm);
|
||||
else
|
||||
return crypto_init_blkcipher_ops_async(tfm);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_report_blkcipher rblkcipher;
|
||||
|
||||
memset(&rblkcipher, 0, sizeof(rblkcipher));
|
||||
|
||||
strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
|
||||
strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
|
||||
|
||||
rblkcipher.blocksize = alg->cra_blocksize;
|
||||
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
|
||||
rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
|
||||
rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
|
||||
|
||||
return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
|
||||
sizeof(rblkcipher), &rblkcipher);
|
||||
}
|
||||
#else
|
||||
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
__maybe_unused;
|
||||
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
{
|
||||
seq_printf(m, "type : blkcipher\n");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
|
||||
seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
|
||||
seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
|
||||
seq_printf(m, "geniv : <default>\n");
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_blkcipher_type = {
|
||||
.ctxsize = crypto_blkcipher_ctxsize,
|
||||
.init = crypto_init_blkcipher_ops,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show = crypto_blkcipher_show,
|
||||
#endif
|
||||
.report = crypto_blkcipher_report,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Generic block chaining cipher type");
|
|
@ -8,29 +8,10 @@
|
|||
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/internal/chacha.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int bytes, int nrounds)
|
||||
{
|
||||
/* aligned to potentially speed up crypto_xor() */
|
||||
u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
|
||||
|
||||
while (bytes >= CHACHA_BLOCK_SIZE) {
|
||||
chacha_block(state, stream, nrounds);
|
||||
crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
|
||||
bytes -= CHACHA_BLOCK_SIZE;
|
||||
dst += CHACHA_BLOCK_SIZE;
|
||||
src += CHACHA_BLOCK_SIZE;
|
||||
}
|
||||
if (bytes) {
|
||||
chacha_block(state, stream, nrounds);
|
||||
crypto_xor_cpy(dst, src, stream, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static int chacha_stream_xor(struct skcipher_request *req,
|
||||
const struct chacha_ctx *ctx, const u8 *iv)
|
||||
{
|
||||
|
@ -40,7 +21,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
|
|||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
crypto_chacha_init(state, ctx, iv);
|
||||
chacha_init_generic(state, ctx->key, iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
@ -48,75 +29,23 @@ static int chacha_stream_xor(struct skcipher_request *req,
|
|||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
|
||||
|
||||
chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes, ctx->nrounds);
|
||||
chacha_crypt_generic(state, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, nbytes, ctx->nrounds);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv)
|
||||
{
|
||||
state[0] = 0x61707865; /* "expa" */
|
||||
state[1] = 0x3320646e; /* "nd 3" */
|
||||
state[2] = 0x79622d32; /* "2-by" */
|
||||
state[3] = 0x6b206574; /* "te k" */
|
||||
state[4] = ctx->key[0];
|
||||
state[5] = ctx->key[1];
|
||||
state[6] = ctx->key[2];
|
||||
state[7] = ctx->key[3];
|
||||
state[8] = ctx->key[4];
|
||||
state[9] = ctx->key[5];
|
||||
state[10] = ctx->key[6];
|
||||
state[11] = ctx->key[7];
|
||||
state[12] = get_unaligned_le32(iv + 0);
|
||||
state[13] = get_unaligned_le32(iv + 4);
|
||||
state[14] = get_unaligned_le32(iv + 8);
|
||||
state[15] = get_unaligned_le32(iv + 12);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_chacha_init);
|
||||
|
||||
static int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keysize, int nrounds)
|
||||
{
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int i;
|
||||
|
||||
if (keysize != CHACHA_KEY_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
|
||||
ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
|
||||
|
||||
ctx->nrounds = nrounds;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keysize)
|
||||
{
|
||||
return chacha_setkey(tfm, key, keysize, 20);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
|
||||
|
||||
int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keysize)
|
||||
{
|
||||
return chacha_setkey(tfm, key, keysize, 12);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_chacha12_setkey);
|
||||
|
||||
int crypto_chacha_crypt(struct skcipher_request *req)
|
||||
static int crypto_chacha_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return chacha_stream_xor(req, ctx, req->iv);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_chacha_crypt);
|
||||
|
||||
int crypto_xchacha_crypt(struct skcipher_request *req)
|
||||
static int crypto_xchacha_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
@ -125,8 +54,8 @@ int crypto_xchacha_crypt(struct skcipher_request *req)
|
|||
u8 real_iv[16];
|
||||
|
||||
/* Compute the subkey given the original key and first 128 nonce bits */
|
||||
crypto_chacha_init(state, ctx, req->iv);
|
||||
hchacha_block(state, subctx.key, ctx->nrounds);
|
||||
chacha_init_generic(state, ctx->key, req->iv);
|
||||
hchacha_block_generic(state, subctx.key, ctx->nrounds);
|
||||
subctx.nrounds = ctx->nrounds;
|
||||
|
||||
/* Build the real IV */
|
||||
|
@ -136,7 +65,6 @@ int crypto_xchacha_crypt(struct skcipher_request *req)
|
|||
/* Generate the stream and XOR it with the data */
|
||||
return chacha_stream_xor(req, &subctx, real_iv);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_xchacha_crypt);
|
||||
|
||||
static struct skcipher_alg algs[] = {
|
||||
{
|
||||
|
@ -151,7 +79,7 @@ static struct skcipher_alg algs[] = {
|
|||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = CHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = crypto_chacha_crypt,
|
||||
.decrypt = crypto_chacha_crypt,
|
||||
}, {
|
||||
|
@ -166,7 +94,7 @@ static struct skcipher_alg algs[] = {
|
|||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = crypto_xchacha_crypt,
|
||||
.decrypt = crypto_xchacha_crypt,
|
||||
}, {
|
||||
|
@ -181,7 +109,7 @@ static struct skcipher_alg algs[] = {
|
|||
.max_keysize = CHACHA_KEY_SIZE,
|
||||
.ivsize = XCHACHA_IV_SIZE,
|
||||
.chunksize = CHACHA_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha12_setkey,
|
||||
.setkey = chacha12_setkey,
|
||||
.encrypt = crypto_xchacha_crypt,
|
||||
.decrypt = crypto_xchacha_crypt,
|
||||
}
|
||||
|
|
|
@ -919,7 +919,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
return PTR_ERR(algt);
|
||||
|
||||
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_BLKCIPHER:
|
||||
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||
return cryptd_create_skcipher(tmpl, tb, &queue);
|
||||
case CRYPTO_ALG_TYPE_HASH:
|
||||
return cryptd_create_hash(tmpl, tb, &queue);
|
||||
|
|
|
@ -213,20 +213,6 @@ static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
|
|||
return crypto_transfer_request(engine, req, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
|
||||
* to list into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
* TODO: Remove this function when skcipher conversion is finished
|
||||
*/
|
||||
int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req)
|
||||
{
|
||||
return crypto_transfer_request_to_engine(engine, &req->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_transfer_aead_request_to_engine - transfer one aead_request
|
||||
* to list into the engine queue
|
||||
|
@ -279,21 +265,6 @@ int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
|
||||
* the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
* TODO: Remove this function when skcipher conversion is finished
|
||||
*/
|
||||
void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req, int err)
|
||||
{
|
||||
return crypto_finalize_request(engine, &req->base, err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
|
||||
|
||||
/**
|
||||
* crypto_finalize_aead_request - finalize one aead_request if
|
||||
* the request is done
|
||||
|
|
|
@ -213,8 +213,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
|
|||
drop_alg:
|
||||
crypto_mod_put(alg);
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
|
||||
}
|
||||
|
|
|
@ -213,10 +213,6 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
|
|||
if (crypto_report_cipher(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_BLKCIPHER:
|
||||
if (crypto_report_cipher(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_CIPHER:
|
||||
if (crypto_report_cipher(skb, alg))
|
||||
goto nla_put_failure;
|
||||
|
@ -328,8 +324,10 @@ int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
|
|||
drop_alg:
|
||||
crypto_mod_put(alg);
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <crypto/curve25519.h>
|
||||
#include <crypto/internal/kpp.h>
|
||||
#include <crypto/kpp.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
|
||||
unsigned int len)
|
||||
{
|
||||
u8 *secret = kpp_tfm_ctx(tfm);
|
||||
|
||||
if (!len)
|
||||
curve25519_generate_secret(secret);
|
||||
else if (len == CURVE25519_KEY_SIZE &&
|
||||
crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
|
||||
memcpy(secret, buf, CURVE25519_KEY_SIZE);
|
||||
else
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int curve25519_compute_value(struct kpp_request *req)
|
||||
{
|
||||
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
|
||||
const u8 *secret = kpp_tfm_ctx(tfm);
|
||||
u8 public_key[CURVE25519_KEY_SIZE];
|
||||
u8 buf[CURVE25519_KEY_SIZE];
|
||||
int copied, nbytes;
|
||||
u8 const *bp;
|
||||
|
||||
if (req->src) {
|
||||
copied = sg_copy_to_buffer(req->src,
|
||||
sg_nents_for_len(req->src,
|
||||
CURVE25519_KEY_SIZE),
|
||||
public_key, CURVE25519_KEY_SIZE);
|
||||
if (copied != CURVE25519_KEY_SIZE)
|
||||
return -EINVAL;
|
||||
bp = public_key;
|
||||
} else {
|
||||
bp = curve25519_base_point;
|
||||
}
|
||||
|
||||
curve25519_generic(buf, secret, bp);
|
||||
|
||||
/* might want less than we've got */
|
||||
nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
|
||||
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
|
||||
nbytes),
|
||||
buf, nbytes);
|
||||
if (copied != nbytes)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
|
||||
{
|
||||
return CURVE25519_KEY_SIZE;
|
||||
}
|
||||
|
||||
static struct kpp_alg curve25519_alg = {
|
||||
.base.cra_name = "curve25519",
|
||||
.base.cra_driver_name = "curve25519-generic",
|
||||
.base.cra_priority = 100,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_ctxsize = CURVE25519_KEY_SIZE,
|
||||
|
||||
.set_secret = curve25519_set_secret,
|
||||
.generate_public_key = curve25519_compute_value,
|
||||
.compute_shared_secret = curve25519_compute_value,
|
||||
.max_size = curve25519_max_size,
|
||||
};
|
||||
|
||||
static int curve25519_init(void)
|
||||
{
|
||||
return crypto_register_kpp(&curve25519_alg);
|
||||
}
|
||||
|
||||
static void curve25519_exit(void)
|
||||
{
|
||||
crypto_unregister_kpp(&curve25519_alg);
|
||||
}
|
||||
|
||||
subsys_initcall(curve25519_init);
|
||||
module_exit(curve25519_exit);
|
||||
|
||||
MODULE_ALIAS_CRYPTO("curve25519");
|
||||
MODULE_ALIAS_CRYPTO("curve25519-generic");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -336,7 +336,7 @@ static u64 vli_usub(u64 *result, const u64 *left, u64 right,
|
|||
static uint128_t mul_64_64(u64 left, u64 right)
|
||||
{
|
||||
uint128_t result;
|
||||
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
|
||||
#if defined(CONFIG_ARCH_SUPPORTS_INT128)
|
||||
unsigned __int128 m = (unsigned __int128)left * right;
|
||||
|
||||
result.m_low = m;
|
||||
|
@ -1284,10 +1284,11 @@ EXPORT_SYMBOL(ecc_point_mult_shamir);
|
|||
static inline void ecc_swap_digits(const u64 *in, u64 *out,
|
||||
unsigned int ndigits)
|
||||
{
|
||||
const __be64 *src = (__force __be64 *)in;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ndigits; i++)
|
||||
out[i] = __swab64(in[ndigits - 1 - i]);
|
||||
out[i] = be64_to_cpu(src[ndigits - 1 - i]);
|
||||
}
|
||||
|
||||
static int __ecc_is_key_valid(const struct ecc_curve *curve,
|
||||
|
|
|
@ -188,8 +188,7 @@ static void essiv_aead_done(struct crypto_async_request *areq, int err)
|
|||
struct aead_request *req = areq->data;
|
||||
struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
|
||||
|
||||
if (rctx->assoc)
|
||||
kfree(rctx->assoc);
|
||||
kfree(rctx->assoc);
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
|
@ -486,7 +485,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
type = algt->type & algt->mask;
|
||||
|
||||
switch (type) {
|
||||
case CRYPTO_ALG_TYPE_BLKCIPHER:
|
||||
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||
skcipher_inst = kzalloc(sizeof(*skcipher_inst) +
|
||||
sizeof(*ictx), GFP_KERNEL);
|
||||
if (!skcipher_inst)
|
||||
|
@ -586,7 +585,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
base->cra_alignmask = block_base->cra_alignmask;
|
||||
base->cra_priority = block_base->cra_priority;
|
||||
|
||||
if (type == CRYPTO_ALG_TYPE_BLKCIPHER) {
|
||||
if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
|
||||
skcipher_inst->alg.setkey = essiv_skcipher_setkey;
|
||||
skcipher_inst->alg.encrypt = essiv_skcipher_encrypt;
|
||||
skcipher_inst->alg.decrypt = essiv_skcipher_decrypt;
|
||||
|
@ -628,7 +627,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
out_free_hash:
|
||||
crypto_mod_put(_hash_alg);
|
||||
out_drop_skcipher:
|
||||
if (type == CRYPTO_ALG_TYPE_BLKCIPHER)
|
||||
if (type == CRYPTO_ALG_TYPE_SKCIPHER)
|
||||
crypto_drop_skcipher(&ictx->u.skcipher_spawn);
|
||||
else
|
||||
crypto_drop_aead(&ictx->u.aead_spawn);
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* geniv: Shared IV generator code
|
||||
*
|
||||
* This file provides common code to IV generators such as seqiv.
|
||||
*
|
||||
* Copyright (c) 2007-2019 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*/
|
||||
|
||||
#include <crypto/internal/geniv.h>
|
||||
#include <crypto/internal/rng.h>
|
||||
#include <crypto/null.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static int aead_geniv_setkey(struct crypto_aead *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
return crypto_aead_setkey(ctx->child, key, keylen);
|
||||
}
|
||||
|
||||
static int aead_geniv_setauthsize(struct crypto_aead *tfm,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
return crypto_aead_setauthsize(ctx->child, authsize);
|
||||
}
|
||||
|
||||
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
struct rtattr **tb, u32 type, u32 mask)
|
||||
{
|
||||
const char *name;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct crypto_attr_type *algt;
|
||||
struct aead_instance *inst;
|
||||
struct aead_alg *alg;
|
||||
unsigned int ivsize;
|
||||
unsigned int maxauthsize;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(name))
|
||||
return ERR_CAST(name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spawn = aead_instance_ctx(inst);
|
||||
|
||||
/* Ignore async algorithms if necessary. */
|
||||
mask |= crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
|
||||
err = crypto_grab_aead(spawn, name, type, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_spawn_aead_alg(spawn);
|
||||
|
||||
ivsize = crypto_aead_alg_ivsize(alg);
|
||||
maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
||||
|
||||
err = -EINVAL;
|
||||
if (ivsize < sizeof(u64))
|
||||
goto err_drop_alg;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->base.cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->base.cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
|
||||
|
||||
inst->alg.setkey = aead_geniv_setkey;
|
||||
inst->alg.setauthsize = aead_geniv_setauthsize;
|
||||
|
||||
inst->alg.ivsize = ivsize;
|
||||
inst->alg.maxauthsize = maxauthsize;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
err_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_alloc);
|
||||
|
||||
void aead_geniv_free(struct aead_instance *inst)
|
||||
{
|
||||
crypto_drop_aead(aead_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_free);
|
||||
|
||||
int aead_init_geniv(struct crypto_aead *aead)
|
||||
{
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct aead_instance *inst = aead_alg_instance(aead);
|
||||
struct crypto_aead *child;
|
||||
int err;
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
err = crypto_get_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
|
||||
crypto_aead_ivsize(aead));
|
||||
crypto_put_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ctx->sknull = crypto_get_default_null_skcipher();
|
||||
err = PTR_ERR(ctx->sknull);
|
||||
if (IS_ERR(ctx->sknull))
|
||||
goto out;
|
||||
|
||||
child = crypto_spawn_aead(aead_instance_ctx(inst));
|
||||
err = PTR_ERR(child);
|
||||
if (IS_ERR(child))
|
||||
goto drop_null;
|
||||
|
||||
ctx->child = child;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
|
||||
sizeof(struct aead_request));
|
||||
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
drop_null:
|
||||
crypto_put_default_null_skcipher();
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_init_geniv);
|
||||
|
||||
void aead_exit_geniv(struct crypto_aead *tfm)
|
||||
{
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
crypto_free_aead(ctx->child);
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_exit_geniv);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Shared IV generator code");
|
|
@ -44,13 +44,7 @@
|
|||
#include <linux/crypto.h>
|
||||
#include <crypto/internal/rng.h>
|
||||
|
||||
struct rand_data;
|
||||
int jent_read_entropy(struct rand_data *ec, unsigned char *data,
|
||||
unsigned int len);
|
||||
int jent_entropy_init(void);
|
||||
struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
|
||||
unsigned int flags);
|
||||
void jent_entropy_collector_free(struct rand_data *entropy_collector);
|
||||
#include "jitterentropy.h"
|
||||
|
||||
/***************************************************************************
|
||||
* Helper function
|
||||
|
|
|
@ -103,12 +103,7 @@ struct rand_data {
|
|||
* Helper functions
|
||||
***************************************************************************/
|
||||
|
||||
void jent_get_nstime(__u64 *out);
|
||||
void *jent_zalloc(unsigned int len);
|
||||
void jent_zfree(void *ptr);
|
||||
int jent_fips_enabled(void);
|
||||
void jent_panic(char *s);
|
||||
void jent_memcpy(void *dest, const void *src, unsigned int n);
|
||||
#include "jitterentropy.h"
|
||||
|
||||
/**
|
||||
* Update of the loop count used for the next round of
|
||||
|
@ -172,7 +167,7 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
|
|||
* implies that careful retesting must be done.
|
||||
*
|
||||
* Input:
|
||||
* @ec entropy collector struct -- may be NULL
|
||||
* @ec entropy collector struct
|
||||
* @time time stamp to be injected
|
||||
* @loop_cnt if a value not equal to 0 is set, use the given value as number of
|
||||
* loops to perform the folding
|
||||
|
@ -400,8 +395,8 @@ static void jent_gen_entropy(struct rand_data *ec)
|
|||
* primes the test if needed.
|
||||
*
|
||||
* Return:
|
||||
* 0 if FIPS test passed
|
||||
* < 0 if FIPS test failed
|
||||
* returns normally if FIPS test passed
|
||||
* panics the kernel if FIPS test failed
|
||||
*/
|
||||
static void jent_fips_test(struct rand_data *ec)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
extern void *jent_zalloc(unsigned int len);
|
||||
extern void jent_zfree(void *ptr);
|
||||
extern int jent_fips_enabled(void);
|
||||
extern void jent_panic(char *s);
|
||||
extern void jent_memcpy(void *dest, const void *src, unsigned int n);
|
||||
extern void jent_get_nstime(__u64 *out);
|
||||
|
||||
struct rand_data;
|
||||
extern int jent_entropy_init(void);
|
||||
extern int jent_read_entropy(struct rand_data *ec, unsigned char *data,
|
||||
unsigned int len);
|
||||
|
||||
extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
|
||||
unsigned int flags);
|
||||
extern void jent_entropy_collector_free(struct rand_data *entropy_collector);
|
|
@ -33,6 +33,7 @@
|
|||
#include <asm/unaligned.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/poly1305.h>
|
||||
#include <crypto/nhpoly1305.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -78,7 +79,7 @@ static void process_nh_hash_value(struct nhpoly1305_state *state,
|
|||
BUILD_BUG_ON(NH_HASH_BYTES % POLY1305_BLOCK_SIZE != 0);
|
||||
|
||||
poly1305_core_blocks(&state->poly_state, &key->poly_key, state->nh_hash,
|
||||
NH_HASH_BYTES / POLY1305_BLOCK_SIZE);
|
||||
NH_HASH_BYTES / POLY1305_BLOCK_SIZE, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -13,158 +13,26 @@
|
|||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/poly1305.h>
|
||||
#include <crypto/internal/poly1305.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
static inline u64 mlt(u64 a, u64 b)
|
||||
{
|
||||
return a * b;
|
||||
}
|
||||
|
||||
static inline u32 sr(u64 v, u_char n)
|
||||
{
|
||||
return v >> n;
|
||||
}
|
||||
|
||||
static inline u32 and(u32 v, u32 mask)
|
||||
{
|
||||
return v & mask;
|
||||
}
|
||||
|
||||
int crypto_poly1305_init(struct shash_desc *desc)
|
||||
static int crypto_poly1305_init(struct shash_desc *desc)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
poly1305_core_init(&dctx->h);
|
||||
dctx->buflen = 0;
|
||||
dctx->rset = false;
|
||||
dctx->rset = 0;
|
||||
dctx->sset = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_poly1305_init);
|
||||
|
||||
void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key)
|
||||
{
|
||||
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
|
||||
key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff;
|
||||
key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03;
|
||||
key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff;
|
||||
key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff;
|
||||
key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(poly1305_core_setkey);
|
||||
|
||||
/*
|
||||
* Poly1305 requires a unique key for each tag, which implies that we can't set
|
||||
* it on the tfm that gets accessed by multiple users simultaneously. Instead we
|
||||
* expect the key as the first 32 bytes in the update() call.
|
||||
*/
|
||||
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
if (!dctx->sset) {
|
||||
if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
|
||||
poly1305_core_setkey(&dctx->r, src);
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
srclen -= POLY1305_BLOCK_SIZE;
|
||||
dctx->rset = true;
|
||||
}
|
||||
if (srclen >= POLY1305_BLOCK_SIZE) {
|
||||
dctx->s[0] = get_unaligned_le32(src + 0);
|
||||
dctx->s[1] = get_unaligned_le32(src + 4);
|
||||
dctx->s[2] = get_unaligned_le32(src + 8);
|
||||
dctx->s[3] = get_unaligned_le32(src + 12);
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
srclen -= POLY1305_BLOCK_SIZE;
|
||||
dctx->sset = true;
|
||||
}
|
||||
}
|
||||
return srclen;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey);
|
||||
|
||||
static void poly1305_blocks_internal(struct poly1305_state *state,
|
||||
const struct poly1305_key *key,
|
||||
const void *src, unsigned int nblocks,
|
||||
u32 hibit)
|
||||
{
|
||||
u32 r0, r1, r2, r3, r4;
|
||||
u32 s1, s2, s3, s4;
|
||||
u32 h0, h1, h2, h3, h4;
|
||||
u64 d0, d1, d2, d3, d4;
|
||||
|
||||
if (!nblocks)
|
||||
return;
|
||||
|
||||
r0 = key->r[0];
|
||||
r1 = key->r[1];
|
||||
r2 = key->r[2];
|
||||
r3 = key->r[3];
|
||||
r4 = key->r[4];
|
||||
|
||||
s1 = r1 * 5;
|
||||
s2 = r2 * 5;
|
||||
s3 = r3 * 5;
|
||||
s4 = r4 * 5;
|
||||
|
||||
h0 = state->h[0];
|
||||
h1 = state->h[1];
|
||||
h2 = state->h[2];
|
||||
h3 = state->h[3];
|
||||
h4 = state->h[4];
|
||||
|
||||
do {
|
||||
/* h += m[i] */
|
||||
h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff;
|
||||
h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff;
|
||||
h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff;
|
||||
h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff;
|
||||
h4 += (get_unaligned_le32(src + 12) >> 8) | hibit;
|
||||
|
||||
/* h *= r */
|
||||
d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) +
|
||||
mlt(h3, s2) + mlt(h4, s1);
|
||||
d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) +
|
||||
mlt(h3, s3) + mlt(h4, s2);
|
||||
d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) +
|
||||
mlt(h3, s4) + mlt(h4, s3);
|
||||
d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) +
|
||||
mlt(h3, r0) + mlt(h4, s4);
|
||||
d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) +
|
||||
mlt(h3, r1) + mlt(h4, r0);
|
||||
|
||||
/* (partial) h %= p */
|
||||
d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff);
|
||||
d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff);
|
||||
d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff);
|
||||
d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff);
|
||||
h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff);
|
||||
h1 += h0 >> 26; h0 = h0 & 0x3ffffff;
|
||||
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
} while (--nblocks);
|
||||
|
||||
state->h[0] = h0;
|
||||
state->h[1] = h1;
|
||||
state->h[2] = h2;
|
||||
state->h[3] = h3;
|
||||
state->h[4] = h4;
|
||||
}
|
||||
|
||||
void poly1305_core_blocks(struct poly1305_state *state,
|
||||
const struct poly1305_key *key,
|
||||
const void *src, unsigned int nblocks)
|
||||
{
|
||||
poly1305_blocks_internal(state, key, src, nblocks, 1 << 24);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(poly1305_core_blocks);
|
||||
|
||||
static void poly1305_blocks(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, unsigned int srclen, u32 hibit)
|
||||
static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
|
||||
unsigned int srclen)
|
||||
{
|
||||
unsigned int datalen;
|
||||
|
||||
|
@ -174,12 +42,12 @@ static void poly1305_blocks(struct poly1305_desc_ctx *dctx,
|
|||
srclen = datalen;
|
||||
}
|
||||
|
||||
poly1305_blocks_internal(&dctx->h, &dctx->r,
|
||||
src, srclen / POLY1305_BLOCK_SIZE, hibit);
|
||||
poly1305_core_blocks(&dctx->h, dctx->r, src,
|
||||
srclen / POLY1305_BLOCK_SIZE, 1);
|
||||
}
|
||||
|
||||
int crypto_poly1305_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
static int crypto_poly1305_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
unsigned int bytes;
|
||||
|
@ -193,13 +61,13 @@ int crypto_poly1305_update(struct shash_desc *desc,
|
|||
|
||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||
poly1305_blocks(dctx, dctx->buf,
|
||||
POLY1305_BLOCK_SIZE, 1 << 24);
|
||||
POLY1305_BLOCK_SIZE);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
|
||||
poly1305_blocks(dctx, src, srclen, 1 << 24);
|
||||
poly1305_blocks(dctx, src, srclen);
|
||||
src += srclen - (srclen % POLY1305_BLOCK_SIZE);
|
||||
srclen %= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
|
@ -211,87 +79,17 @@ int crypto_poly1305_update(struct shash_desc *desc,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_poly1305_update);
|
||||
|
||||
void poly1305_core_emit(const struct poly1305_state *state, void *dst)
|
||||
{
|
||||
u32 h0, h1, h2, h3, h4;
|
||||
u32 g0, g1, g2, g3, g4;
|
||||
u32 mask;
|
||||
|
||||
/* fully carry h */
|
||||
h0 = state->h[0];
|
||||
h1 = state->h[1];
|
||||
h2 = state->h[2];
|
||||
h3 = state->h[3];
|
||||
h4 = state->h[4];
|
||||
|
||||
h2 += (h1 >> 26); h1 = h1 & 0x3ffffff;
|
||||
h3 += (h2 >> 26); h2 = h2 & 0x3ffffff;
|
||||
h4 += (h3 >> 26); h3 = h3 & 0x3ffffff;
|
||||
h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff;
|
||||
h1 += (h0 >> 26); h0 = h0 & 0x3ffffff;
|
||||
|
||||
/* compute h + -p */
|
||||
g0 = h0 + 5;
|
||||
g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff;
|
||||
g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff;
|
||||
g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff;
|
||||
g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff;
|
||||
|
||||
/* select h if h < p, or h + -p if h >= p */
|
||||
mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
|
||||
g0 &= mask;
|
||||
g1 &= mask;
|
||||
g2 &= mask;
|
||||
g3 &= mask;
|
||||
g4 &= mask;
|
||||
mask = ~mask;
|
||||
h0 = (h0 & mask) | g0;
|
||||
h1 = (h1 & mask) | g1;
|
||||
h2 = (h2 & mask) | g2;
|
||||
h3 = (h3 & mask) | g3;
|
||||
h4 = (h4 & mask) | g4;
|
||||
|
||||
/* h = h % (2^128) */
|
||||
put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0);
|
||||
put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4);
|
||||
put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8);
|
||||
put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(poly1305_core_emit);
|
||||
|
||||
int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
|
||||
static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
__le32 digest[4];
|
||||
u64 f = 0;
|
||||
|
||||
if (unlikely(!dctx->sset))
|
||||
return -ENOKEY;
|
||||
|
||||
if (unlikely(dctx->buflen)) {
|
||||
dctx->buf[dctx->buflen++] = 1;
|
||||
memset(dctx->buf + dctx->buflen, 0,
|
||||
POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 0);
|
||||
}
|
||||
|
||||
poly1305_core_emit(&dctx->h, digest);
|
||||
|
||||
/* mac = (h + s) % (2^128) */
|
||||
f = (f >> 32) + le32_to_cpu(digest[0]) + dctx->s[0];
|
||||
put_unaligned_le32(f, dst + 0);
|
||||
f = (f >> 32) + le32_to_cpu(digest[1]) + dctx->s[1];
|
||||
put_unaligned_le32(f, dst + 4);
|
||||
f = (f >> 32) + le32_to_cpu(digest[2]) + dctx->s[2];
|
||||
put_unaligned_le32(f, dst + 8);
|
||||
f = (f >> 32) + le32_to_cpu(digest[3]) + dctx->s[3];
|
||||
put_unaligned_le32(f, dst + 12);
|
||||
|
||||
poly1305_final_generic(dctx, dst);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_poly1305_final);
|
||||
|
||||
static struct shash_alg poly1305_alg = {
|
||||
.digestsize = POLY1305_DIGEST_SIZE,
|
||||
|
|
|
@ -580,12 +580,6 @@ EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
|
|||
|
||||
static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
|
||||
{
|
||||
if (alg->cra_type == &crypto_blkcipher_type)
|
||||
return sizeof(struct crypto_blkcipher *);
|
||||
|
||||
if (alg->cra_type == &crypto_ablkcipher_type)
|
||||
return sizeof(struct crypto_ablkcipher *);
|
||||
|
||||
return crypto_alg_extsize(alg);
|
||||
}
|
||||
|
||||
|
@ -595,205 +589,6 @@ static void skcipher_set_needkey(struct crypto_skcipher *tfm)
|
|||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
}
|
||||
|
||||
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_blkcipher *blkcipher = *ctx;
|
||||
int err;
|
||||
|
||||
crypto_blkcipher_clear_flags(blkcipher, ~0);
|
||||
crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
|
||||
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
if (unlikely(err)) {
|
||||
skcipher_set_needkey(tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
|
||||
int (*crypt)(struct blkcipher_desc *,
|
||||
struct scatterlist *,
|
||||
struct scatterlist *,
|
||||
unsigned int))
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
|
||||
struct blkcipher_desc desc = {
|
||||
.tfm = *ctx,
|
||||
.info = req->iv,
|
||||
.flags = req->base.flags,
|
||||
};
|
||||
|
||||
|
||||
return crypt(&desc, req->dst, req->src, req->cryptlen);
|
||||
}
|
||||
|
||||
static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
|
||||
return skcipher_crypt_blkcipher(req, alg->encrypt);
|
||||
}
|
||||
|
||||
static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
|
||||
return skcipher_crypt_blkcipher(req, alg->decrypt);
|
||||
}
|
||||
|
||||
static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_blkcipher(*ctx);
|
||||
}
|
||||
|
||||
static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *calg = tfm->__crt_alg;
|
||||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||
struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_blkcipher *blkcipher;
|
||||
struct crypto_tfm *btfm;
|
||||
|
||||
if (!crypto_mod_get(calg))
|
||||
return -EAGAIN;
|
||||
|
||||
btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (IS_ERR(btfm)) {
|
||||
crypto_mod_put(calg);
|
||||
return PTR_ERR(btfm);
|
||||
}
|
||||
|
||||
blkcipher = __crypto_blkcipher_cast(btfm);
|
||||
*ctx = blkcipher;
|
||||
tfm->exit = crypto_exit_skcipher_ops_blkcipher;
|
||||
|
||||
skcipher->setkey = skcipher_setkey_blkcipher;
|
||||
skcipher->encrypt = skcipher_encrypt_blkcipher;
|
||||
skcipher->decrypt = skcipher_decrypt_blkcipher;
|
||||
|
||||
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
|
||||
skcipher->keysize = calg->cra_blkcipher.max_keysize;
|
||||
|
||||
skcipher_set_needkey(skcipher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_ablkcipher *ablkcipher = *ctx;
|
||||
int err;
|
||||
|
||||
crypto_ablkcipher_clear_flags(ablkcipher, ~0);
|
||||
crypto_ablkcipher_set_flags(ablkcipher,
|
||||
crypto_skcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
|
||||
crypto_skcipher_set_flags(tfm,
|
||||
crypto_ablkcipher_get_flags(ablkcipher) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
if (unlikely(err)) {
|
||||
skcipher_set_needkey(tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
|
||||
int (*crypt)(struct ablkcipher_request *))
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
|
||||
struct ablkcipher_request *subreq = skcipher_request_ctx(req);
|
||||
|
||||
ablkcipher_request_set_tfm(subreq, *ctx);
|
||||
ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
|
||||
req->iv);
|
||||
|
||||
return crypt(subreq);
|
||||
}
|
||||
|
||||
static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
|
||||
struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
|
||||
|
||||
return skcipher_crypt_ablkcipher(req, alg->encrypt);
|
||||
}
|
||||
|
||||
static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
|
||||
struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
|
||||
|
||||
return skcipher_crypt_ablkcipher(req, alg->decrypt);
|
||||
}
|
||||
|
||||
static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_ablkcipher(*ctx);
|
||||
}
|
||||
|
||||
static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *calg = tfm->__crt_alg;
|
||||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||
struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_ablkcipher *ablkcipher;
|
||||
struct crypto_tfm *abtfm;
|
||||
|
||||
if (!crypto_mod_get(calg))
|
||||
return -EAGAIN;
|
||||
|
||||
abtfm = __crypto_alloc_tfm(calg, 0, 0);
|
||||
if (IS_ERR(abtfm)) {
|
||||
crypto_mod_put(calg);
|
||||
return PTR_ERR(abtfm);
|
||||
}
|
||||
|
||||
ablkcipher = __crypto_ablkcipher_cast(abtfm);
|
||||
*ctx = ablkcipher;
|
||||
tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
|
||||
|
||||
skcipher->setkey = skcipher_setkey_ablkcipher;
|
||||
skcipher->encrypt = skcipher_encrypt_ablkcipher;
|
||||
skcipher->decrypt = skcipher_decrypt_ablkcipher;
|
||||
|
||||
skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
|
||||
sizeof(struct ablkcipher_request);
|
||||
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
|
||||
|
||||
skcipher_set_needkey(skcipher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
|
@ -888,12 +683,6 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
|
|||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
|
||||
|
||||
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
|
||||
return crypto_init_skcipher_ops_blkcipher(tfm);
|
||||
|
||||
if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
|
||||
return crypto_init_skcipher_ops_ablkcipher(tfm);
|
||||
|
||||
skcipher->setkey = skcipher_setkey;
|
||||
skcipher->encrypt = alg->encrypt;
|
||||
skcipher->decrypt = alg->decrypt;
|
||||
|
@ -964,7 +753,7 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|||
}
|
||||
#endif
|
||||
|
||||
static const struct crypto_type crypto_skcipher_type2 = {
|
||||
static const struct crypto_type crypto_skcipher_type = {
|
||||
.extsize = crypto_skcipher_extsize,
|
||||
.init_tfm = crypto_skcipher_init_tfm,
|
||||
.free = crypto_skcipher_free_instance,
|
||||
|
@ -973,7 +762,7 @@ static const struct crypto_type crypto_skcipher_type2 = {
|
|||
#endif
|
||||
.report = crypto_skcipher_report,
|
||||
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
||||
.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
|
||||
.maskset = CRYPTO_ALG_TYPE_MASK,
|
||||
.type = CRYPTO_ALG_TYPE_SKCIPHER,
|
||||
.tfmsize = offsetof(struct crypto_skcipher, base),
|
||||
};
|
||||
|
@ -981,7 +770,7 @@ static const struct crypto_type crypto_skcipher_type2 = {
|
|||
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
|
||||
const char *name, u32 type, u32 mask)
|
||||
{
|
||||
spawn->base.frontend = &crypto_skcipher_type2;
|
||||
spawn->base.frontend = &crypto_skcipher_type;
|
||||
return crypto_grab_spawn(&spawn->base, name, type, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
|
||||
|
@ -989,7 +778,7 @@ EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
|
|||
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
|
||||
return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
|
||||
|
||||
|
@ -1001,7 +790,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
|
|||
/* Only sync algorithms allowed. */
|
||||
mask |= CRYPTO_ALG_ASYNC;
|
||||
|
||||
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
|
||||
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
|
||||
|
||||
/*
|
||||
* Make sure we do not allocate something that might get used with
|
||||
|
@ -1017,12 +806,11 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
|
||||
|
||||
int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
|
||||
int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
|
||||
type, mask);
|
||||
return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
|
||||
EXPORT_SYMBOL_GPL(crypto_has_skcipher);
|
||||
|
||||
static int skcipher_prepare_alg(struct skcipher_alg *alg)
|
||||
{
|
||||
|
@ -1037,7 +825,7 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg)
|
|||
if (!alg->walksize)
|
||||
alg->walksize = alg->chunksize;
|
||||
|
||||
base->cra_type = &crypto_skcipher_type2;
|
||||
base->cra_type = &crypto_skcipher_type;
|
||||
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ static int mode;
|
|||
static u32 num_mb = 8;
|
||||
static char *tvmem[TVMEMSIZE];
|
||||
|
||||
static char *check[] = {
|
||||
static const char *check[] = {
|
||||
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
|
||||
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
|
||||
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
|
||||
|
@ -1634,7 +1634,7 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
|
|||
|
||||
static void test_available(void)
|
||||
{
|
||||
char **name = check;
|
||||
const char **name = check;
|
||||
|
||||
while (*name) {
|
||||
printk("alg %s ", *name);
|
||||
|
|
|
@ -4022,6 +4022,58 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||
.alg = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
|
||||
.test = alg_test_null,
|
||||
.fips_allowed = 1,
|
||||
}, {
|
||||
.alg = "blake2b-160",
|
||||
.test = alg_test_hash,
|
||||
.fips_allowed = 0,
|
||||
.suite = {
|
||||
.hash = __VECS(blake2b_160_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2b-256",
|
||||
.test = alg_test_hash,
|
||||
.fips_allowed = 0,
|
||||
.suite = {
|
||||
.hash = __VECS(blake2b_256_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2b-384",
|
||||
.test = alg_test_hash,
|
||||
.fips_allowed = 0,
|
||||
.suite = {
|
||||
.hash = __VECS(blake2b_384_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2b-512",
|
||||
.test = alg_test_hash,
|
||||
.fips_allowed = 0,
|
||||
.suite = {
|
||||
.hash = __VECS(blake2b_512_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2s-128",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(blakes2s_128_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2s-160",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(blakes2s_160_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2s-224",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(blakes2s_224_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2s-256",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(blakes2s_256_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "cbc(aes)",
|
||||
.test = alg_test_skcipher,
|
||||
|
@ -4125,6 +4177,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||
.suite = {
|
||||
.cipher = __VECS(aes_cfb_tv_template)
|
||||
},
|
||||
}, {
|
||||
.alg = "cfb(sm4)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(sm4_cfb_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "chacha20",
|
||||
.test = alg_test_skcipher,
|
||||
|
@ -4259,6 +4317,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||
.alg = "cts(cbc(paes))",
|
||||
.test = alg_test_null,
|
||||
.fips_allowed = 1,
|
||||
}, {
|
||||
.alg = "curve25519",
|
||||
.test = alg_test_kpp,
|
||||
.suite = {
|
||||
.kpp = __VECS(curve25519_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "deflate",
|
||||
.test = alg_test_comp,
|
||||
|
@ -4654,6 +4718,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||
.suite = {
|
||||
.hash = __VECS(hmac_sha512_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "hmac(sm3)",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(hmac_sm3_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "hmac(streebog256)",
|
||||
.test = alg_test_hash,
|
||||
|
@ -4790,6 +4860,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||
.alg = "ofb(paes)",
|
||||
.test = alg_test_null,
|
||||
.fips_allowed = 1,
|
||||
}, {
|
||||
.alg = "ofb(sm4)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(sm4_ofb_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "pcbc(fcrypt)",
|
||||
.test = alg_test_skcipher,
|
||||
|
@ -4828,6 +4904,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||
.suite = {
|
||||
.cipher = __VECS(aes_ctr_rfc3686_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "rfc3686(ctr(sm4))",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(sm4_ctr_rfc3686_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "rfc4106(gcm(aes))",
|
||||
.generic_driver = "rfc4106(gcm_base(ctr(aes-generic),ghash-generic))",
|
||||
|
|
2124
crypto/testmgr.h
2124
crypto/testmgr.h
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -555,7 +555,7 @@ static int tgr192_final(struct shash_desc *desc, u8 * out)
|
|||
__le32 *le32p;
|
||||
u32 t, msb, lsb;
|
||||
|
||||
tgr192_update(desc, NULL, 0); /* flush */ ;
|
||||
tgr192_update(desc, NULL, 0); /* flush */
|
||||
|
||||
msb = 0;
|
||||
t = tctx->nblocks;
|
||||
|
@ -583,7 +583,7 @@ static int tgr192_final(struct shash_desc *desc, u8 * out)
|
|||
while (tctx->count < 64) {
|
||||
tctx->hash[tctx->count++] = 0;
|
||||
}
|
||||
tgr192_update(desc, NULL, 0); /* flush */ ;
|
||||
tgr192_update(desc, NULL, 0); /* flush */
|
||||
memset(tctx->hash, 0, 56); /* fill next block with zeroes */
|
||||
}
|
||||
/* append the 64 bit count */
|
||||
|
|
|
@ -308,6 +308,19 @@ config HW_RANDOM_HISI
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_HISI_V2
|
||||
tristate "HiSilicon True Random Number Generator V2 support"
|
||||
depends on HW_RANDOM && ARM64 && ACPI
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the True Random Number
|
||||
Generator V2 hardware found on HiSilicon Hi1620 SoC.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called hisi-trng-v2.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_ST
|
||||
tristate "ST Microelectronics HW Random Number Generator support"
|
||||
depends on HW_RANDOM && ARCH_STI
|
||||
|
@ -440,6 +453,19 @@ config HW_RANDOM_OPTEE
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_NPCM
|
||||
tristate "NPCM Random Number Generator support"
|
||||
depends on ARCH_NPCM || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides support for the Random Number
|
||||
Generator hardware available in Nuvoton NPCM SoCs.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called npcm-rng.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
endif # HW_RANDOM
|
||||
|
||||
config UML_RANDOM
|
||||
|
@ -458,7 +484,7 @@ config UML_RANDOM
|
|||
/dev/hwrng and injects the entropy into /dev/random.
|
||||
|
||||
config HW_RANDOM_KEYSTONE
|
||||
depends on ARCH_KEYSTONE
|
||||
depends on ARCH_KEYSTONE || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
tristate "TI Keystone NETCP SA Hardware random number generator"
|
||||
help
|
||||
|
|
|
@ -27,6 +27,7 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
|
|||
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_HISI_V2) += hisi-trng-v2.o
|
||||
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
|
||||
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
|
||||
|
@ -39,3 +40,4 @@ obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
|
|||
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
|
||||
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
|
||||
|
|
|
@ -14,14 +14,22 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#define TRNG_CR 0x00
|
||||
#define TRNG_MR 0x04
|
||||
#define TRNG_ISR 0x1c
|
||||
#define TRNG_ODATA 0x50
|
||||
|
||||
#define TRNG_KEY 0x524e4700 /* RNG */
|
||||
|
||||
#define TRNG_HALFR BIT(0) /* generate RN every 168 cycles */
|
||||
|
||||
struct atmel_trng_data {
|
||||
bool has_half_rate;
|
||||
};
|
||||
|
||||
struct atmel_trng {
|
||||
struct clk *clk;
|
||||
void __iomem *base;
|
||||
|
@ -62,21 +70,31 @@ static void atmel_trng_disable(struct atmel_trng *trng)
|
|||
static int atmel_trng_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct atmel_trng *trng;
|
||||
struct resource *res;
|
||||
const struct atmel_trng_data *data;
|
||||
int ret;
|
||||
|
||||
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
|
||||
if (!trng)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
trng->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
trng->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(trng->base))
|
||||
return PTR_ERR(trng->base);
|
||||
|
||||
trng->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(trng->clk))
|
||||
return PTR_ERR(trng->clk);
|
||||
data = of_device_get_match_data(&pdev->dev);
|
||||
if (!data)
|
||||
return -ENODEV;
|
||||
|
||||
if (data->has_half_rate) {
|
||||
unsigned long rate = clk_get_rate(trng->clk);
|
||||
|
||||
/* if peripheral clk is above 100MHz, set HALFR */
|
||||
if (rate > 100000000)
|
||||
writel(TRNG_HALFR, trng->base + TRNG_MR);
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(trng->clk);
|
||||
if (ret)
|
||||
|
@ -141,9 +159,24 @@ static const struct dev_pm_ops atmel_trng_pm_ops = {
|
|||
};
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static const struct atmel_trng_data at91sam9g45_config = {
|
||||
.has_half_rate = false,
|
||||
};
|
||||
|
||||
static const struct atmel_trng_data sam9x60_config = {
|
||||
.has_half_rate = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id atmel_trng_dt_ids[] = {
|
||||
{ .compatible = "atmel,at91sam9g45-trng" },
|
||||
{ /* sentinel */ }
|
||||
{
|
||||
.compatible = "atmel,at91sam9g45-trng",
|
||||
.data = &at91sam9g45_config,
|
||||
}, {
|
||||
.compatible = "microchip,sam9x60-trng",
|
||||
.data = &sam9x60_config,
|
||||
}, {
|
||||
/* sentinel */
|
||||
}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
|
||||
|
||||
|
|
|
@ -142,7 +142,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
|
|||
struct device_node *np = dev->of_node;
|
||||
const struct of_device_id *rng_id;
|
||||
struct bcm2835_rng_priv *priv;
|
||||
struct resource *r;
|
||||
int err;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
|
@ -151,10 +150,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
||||
/* map peripheral */
|
||||
priv->base = devm_ioremap_resource(dev, r);
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(priv->base))
|
||||
return PTR_ERR(priv->base);
|
||||
|
||||
|
|
|
@ -111,6 +111,14 @@ static void drop_current_rng(void)
|
|||
}
|
||||
|
||||
/* Returns ERR_PTR(), NULL or refcounted hwrng */
|
||||
static struct hwrng *get_current_rng_nolock(void)
|
||||
{
|
||||
if (current_rng)
|
||||
kref_get(¤t_rng->ref);
|
||||
|
||||
return current_rng;
|
||||
}
|
||||
|
||||
static struct hwrng *get_current_rng(void)
|
||||
{
|
||||
struct hwrng *rng;
|
||||
|
@ -118,9 +126,7 @@ static struct hwrng *get_current_rng(void)
|
|||
if (mutex_lock_interruptible(&rng_mutex))
|
||||
return ERR_PTR(-ERESTARTSYS);
|
||||
|
||||
rng = current_rng;
|
||||
if (rng)
|
||||
kref_get(&rng->ref);
|
||||
rng = get_current_rng_nolock();
|
||||
|
||||
mutex_unlock(&rng_mutex);
|
||||
return rng;
|
||||
|
@ -155,8 +161,6 @@ static int hwrng_init(struct hwrng *rng)
|
|||
reinit_completion(&rng->cleanup_done);
|
||||
|
||||
skip_init:
|
||||
add_early_randomness(rng);
|
||||
|
||||
current_quality = rng->quality ? : default_quality;
|
||||
if (current_quality > 1024)
|
||||
current_quality = 1024;
|
||||
|
@ -320,12 +324,13 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
|
|||
const char *buf, size_t len)
|
||||
{
|
||||
int err = -ENODEV;
|
||||
struct hwrng *rng;
|
||||
struct hwrng *rng, *old_rng, *new_rng;
|
||||
|
||||
err = mutex_lock_interruptible(&rng_mutex);
|
||||
if (err)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
old_rng = current_rng;
|
||||
if (sysfs_streq(buf, "")) {
|
||||
err = enable_best_rng();
|
||||
} else {
|
||||
|
@ -337,9 +342,15 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
new_rng = get_current_rng_nolock();
|
||||
mutex_unlock(&rng_mutex);
|
||||
|
||||
if (new_rng) {
|
||||
if (new_rng != old_rng)
|
||||
add_early_randomness(new_rng);
|
||||
put_rng(new_rng);
|
||||
}
|
||||
|
||||
return err ? : len;
|
||||
}
|
||||
|
||||
|
@ -457,13 +468,15 @@ static void start_khwrngd(void)
|
|||
int hwrng_register(struct hwrng *rng)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
struct hwrng *old_rng, *tmp;
|
||||
struct hwrng *tmp;
|
||||
struct list_head *rng_list_ptr;
|
||||
bool is_new_current = false;
|
||||
|
||||
if (!rng->name || (!rng->data_read && !rng->read))
|
||||
goto out;
|
||||
|
||||
mutex_lock(&rng_mutex);
|
||||
|
||||
/* Must not register two RNGs with the same name. */
|
||||
err = -EEXIST;
|
||||
list_for_each_entry(tmp, &rng_list, list) {
|
||||
|
@ -482,10 +495,8 @@ int hwrng_register(struct hwrng *rng)
|
|||
}
|
||||
list_add_tail(&rng->list, rng_list_ptr);
|
||||
|
||||
old_rng = current_rng;
|
||||
err = 0;
|
||||
if (!old_rng ||
|
||||
(!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
|
||||
if (!current_rng ||
|
||||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
|
||||
/*
|
||||
* Set new rng as current as the new rng source
|
||||
* provides better entropy quality and was not
|
||||
|
@ -494,19 +505,26 @@ int hwrng_register(struct hwrng *rng)
|
|||
err = set_current_rng(rng);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
/* to use current_rng in add_early_randomness() we need
|
||||
* to take a ref
|
||||
*/
|
||||
is_new_current = true;
|
||||
kref_get(&rng->ref);
|
||||
}
|
||||
|
||||
if (old_rng && !rng->init) {
|
||||
mutex_unlock(&rng_mutex);
|
||||
if (is_new_current || !rng->init) {
|
||||
/*
|
||||
* Use a new device's input to add some randomness to
|
||||
* the system. If this rng device isn't going to be
|
||||
* used right away, its init function hasn't been
|
||||
* called yet; so only use the randomness from devices
|
||||
* that don't need an init callback.
|
||||
* called yet by set_current_rng(); so only use the
|
||||
* randomness from devices that don't need an init callback
|
||||
*/
|
||||
add_early_randomness(rng);
|
||||
}
|
||||
|
||||
if (is_new_current)
|
||||
put_rng(rng);
|
||||
return 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&rng_mutex);
|
||||
out:
|
||||
|
@ -516,10 +534,12 @@ EXPORT_SYMBOL_GPL(hwrng_register);
|
|||
|
||||
void hwrng_unregister(struct hwrng *rng)
|
||||
{
|
||||
struct hwrng *old_rng, *new_rng;
|
||||
int err;
|
||||
|
||||
mutex_lock(&rng_mutex);
|
||||
|
||||
old_rng = current_rng;
|
||||
list_del(&rng->list);
|
||||
if (current_rng == rng) {
|
||||
err = enable_best_rng();
|
||||
|
@ -529,6 +549,7 @@ void hwrng_unregister(struct hwrng *rng)
|
|||
}
|
||||
}
|
||||
|
||||
new_rng = get_current_rng_nolock();
|
||||
if (list_empty(&rng_list)) {
|
||||
mutex_unlock(&rng_mutex);
|
||||
if (hwrng_fill)
|
||||
|
@ -536,6 +557,12 @@ void hwrng_unregister(struct hwrng *rng)
|
|||
} else
|
||||
mutex_unlock(&rng_mutex);
|
||||
|
||||
if (new_rng) {
|
||||
if (old_rng != new_rng)
|
||||
add_early_randomness(new_rng);
|
||||
put_rng(new_rng);
|
||||
}
|
||||
|
||||
wait_for_completion(&rng->cleanup_done);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwrng_unregister);
|
||||
|
|
|
@ -109,7 +109,6 @@ static int exynos_trng_init(struct hwrng *rng)
|
|||
static int exynos_trng_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct exynos_trng_dev *trng;
|
||||
struct resource *res;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
|
||||
|
@ -128,8 +127,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, trng);
|
||||
trng->dev = &pdev->dev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
trng->mem = devm_ioremap_resource(&pdev->dev, res);
|
||||
trng->mem = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(trng->mem))
|
||||
return PTR_ERR(trng->mem);
|
||||
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче