Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "API:
   - Fix dcache flushing crash in skcipher.
   - Add hash finup self-tests.
   - Reschedule during speed tests.

  Algorithms:
   - Remove insecure vmac and replace it with vmac64.
   - Add public key verification for DH/ECDH.

  Drivers:
   - Decrease priority of sha-mb on x86.
   - Improve NEON latency/throughput on ARM64.
   - Add md5/sha384/sha512/des/3des to inside-secure.
   - Support eip197d in inside-secure.
   - Only register algorithms supported by the host in virtio.
   - Add cts and remove incompatible cts1 from ccree.
   - Add hisilicon SEC security accelerator driver.
   - Replace msm hwrng driver with qcom pseudo rng driver.

  Misc:
   - Centralize CRC polynomials"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (121 commits)
  crypto: arm64/ghash-ce - implement 4-way aggregation
  crypto: arm64/ghash-ce - replace NEON yield check with block limit
  crypto: hisilicon - sec_send_request() can be static
  lib/mpi: remove redundant variable esign
  crypto: arm64/aes-ce-gcm - don't reload key schedule if avoidable
  crypto: arm64/aes-ce-gcm - implement 2-way aggregation
  crypto: arm64/aes-ce-gcm - operate on two input blocks at a time
  crypto: dh - make crypto_dh_encode_key() make robust
  crypto: dh - fix calculating encoded key size
  crypto: ccp - Check for NULL PSP pointer at module unload
  crypto: arm/chacha20 - always use vrev for 16-bit rotates
  crypto: ccree - allow bigger than sector XTS op
  crypto: ccree - zero all of request ctx before use
  crypto: ccree - remove cipher ivgen left overs
  crypto: ccree - drop useless type flag during reg
  crypto: ablkcipher - fix crash flushing dcache in error path
  crypto: blkcipher - fix crash flushing dcache in error path
  crypto: skcipher - fix crash flushing dcache in error path
  crypto: skcipher - remove unnecessary setting of walk->nbytes
  crypto: scatterwalk - remove scatterwalk_samebuf()
  ...
This commit is contained in:
Linus Torvalds 2018-08-15 16:01:47 -07:00
commit dafa5f6577
174 changed files with 6690 additions and 2013 deletions

View File

@ -162,7 +162,7 @@ Code Example For Use of Operational State Memory With SHASH
char *hash_alg_name = "sha1-padlock-nano";
int ret;
alg = crypto_alloc_shash(hash_alg_name, CRYPTO_ALG_TYPE_SHASH, 0);
alg = crypto_alloc_shash(hash_alg_name, 0, 0);
if (IS_ERR(alg)) {
pr_info("can't alloc alg %s\n", hash_alg_name);
return PTR_ERR(alg);

View File

@ -0,0 +1,67 @@
* Hisilicon hip07 Security Accelerator (SEC)
Required properties:
- compatible: Must contain one of
- "hisilicon,hip06-sec"
- "hisilicon,hip07-sec"
- reg: Memory addresses and lengths of the memory regions through which
this device is controlled.
Region 0 has registers to control the backend processing engines.
Region 1 has registers for functionality common to all queues.
Regions 2-18 have registers for the 16 individual queues which are isolated
both in hardware and within the driver.
- interrupts: Interrupt specifiers.
Refer to interrupt-controller/interrupts.txt for generic interrupt client node
bindings.
Interrupt 0 is for the SEC unit error queue.
Interrupt 2N + 1 is the completion interrupt for queue N.
Interrupt 2N + 2 is the error interrupt for queue N.
- dma-coherent: The driver assumes coherent dma is possible.
Optional properties:
- iommus: The SEC units are behind smmu-v3 iommus.
Refer to iommu/arm,smmu-v3.txt for more information.
Example:
p1_sec_a: crypto@400,d2000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x400 0xd0000000 0x0 0x10000
0x400 0xd2000000 0x0 0x10000
0x400 0xd2010000 0x0 0x10000
0x400 0xd2020000 0x0 0x10000
0x400 0xd2030000 0x0 0x10000
0x400 0xd2040000 0x0 0x10000
0x400 0xd2050000 0x0 0x10000
0x400 0xd2060000 0x0 0x10000
0x400 0xd2070000 0x0 0x10000
0x400 0xd2080000 0x0 0x10000
0x400 0xd2090000 0x0 0x10000
0x400 0xd20a0000 0x0 0x10000
0x400 0xd20b0000 0x0 0x10000
0x400 0xd20c0000 0x0 0x10000
0x400 0xd20d0000 0x0 0x10000
0x400 0xd20e0000 0x0 0x10000
0x400 0xd20f0000 0x0 0x10000
0x400 0xd2100000 0x0 0x10000>;
interrupt-parent = <&p1_mbigen_sec_a>;
iommus = <&p1_smmu_alg_a 0x600>;
dma-coherent;
interrupts = <576 4>,
<577 1>, <578 4>,
<579 1>, <580 4>,
<581 1>, <582 4>,
<583 1>, <584 4>,
<585 1>, <586 4>,
<587 1>, <588 4>,
<589 1>, <590 4>,
<591 1>, <592 4>,
<593 1>, <594 4>,
<595 1>, <596 4>,
<597 1>, <598 4>,
<599 1>, <600 4>,
<601 1>, <602 4>,
<603 1>, <604 4>,
<605 1>, <606 4>,
<607 1>, <608 4>;
};

View File

@ -1,8 +1,9 @@
Inside Secure SafeXcel cryptographic engine
Required properties:
- compatible: Should be "inside-secure,safexcel-eip197" or
"inside-secure,safexcel-eip97".
- compatible: Should be "inside-secure,safexcel-eip197b",
"inside-secure,safexcel-eip197d" or
"inside-secure,safexcel-eip97ies".
- reg: Base physical address of the engine and length of memory mapped region.
- interrupts: Interrupt numbers for the rings and engine.
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
@ -14,10 +15,18 @@ Optional properties:
name must be "core" for the first clock and "reg" for
the second one.
Backward compatibility:
Two compatibles are kept for backward compatibility, but shouldn't be used for
new submissions:
- "inside-secure,safexcel-eip197" is equivalent to
"inside-secure,safexcel-eip197b".
- "inside-secure,safexcel-eip97" is equivalent to
"inside-secure,safexcel-eip97ies".
Example:
crypto: crypto@800000 {
compatible = "inside-secure,safexcel-eip197";
compatible = "inside-secure,safexcel-eip197b";
reg = <0x800000 0x200000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -2,7 +2,9 @@ Qualcomm MSM pseudo random number generator.
Required properties:
- compatible : should be "qcom,prng"
- compatible : should be "qcom,prng" for 8916 etc
: should be "qcom,prng-ee" for 8996 and later using EE
(Execution Environment) slice of prng
- reg : specifies base physical address and size of the registers map
- clocks : phandle to clock-controller plus clock-specifier pair
- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block

View File

@ -7364,7 +7364,7 @@ M: Megha Dey <megha.dey@linux.intel.com>
R: Tim Chen <tim.c.chen@linux.intel.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: arch/x86/crypto/sha*-mb
F: arch/x86/crypto/sha*-mb/
F: crypto/mcryptd.c
INTEL TELEMETRY DRIVER

View File

@ -51,9 +51,8 @@ ENTRY(chacha20_block_xor_neon)
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
veor q4, q3, q0
vshl.u32 q3, q4, #16
vsri.u32 q3, q4, #16
veor q3, q3, q0
vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
@ -82,9 +81,8 @@ ENTRY(chacha20_block_xor_neon)
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
veor q4, q3, q0
vshl.u32 q3, q4, #16
vsri.u32 q3, q4, #16
veor q3, q3, q0
vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3

View File

@ -152,7 +152,7 @@ static struct shash_alg ghash_alg = {
.cra_name = "__ghash",
.cra_driver_name = "__driver-ghash-ce",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_key),
.cra_module = THIS_MODULE,
@ -308,9 +308,8 @@ static struct ahash_alg ghash_async_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-ce",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_type = &crypto_ahash_type,
.cra_ctxsize = sizeof(struct ghash_async_ctx),
.cra_module = THIS_MODULE,
.cra_init = ghash_async_init_tfm,

View File

@ -75,7 +75,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -67,7 +67,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -83,7 +83,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-neon",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -78,7 +78,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ce",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -93,7 +92,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ce",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -71,7 +71,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-asm",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -86,7 +85,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-asm",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -79,7 +79,6 @@ struct shash_alg sha256_neon_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-neon",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -94,7 +93,6 @@ struct shash_alg sha256_neon_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-neon",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -63,7 +63,6 @@ static struct shash_alg sha512_arm_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-arm",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -78,7 +77,6 @@ static struct shash_alg sha512_arm_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-arm",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -75,7 +75,6 @@ struct shash_alg sha512_neon_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-neon",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
@ -91,7 +90,6 @@ struct shash_alg sha512_neon_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-neon",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -1049,7 +1049,74 @@ mbigen_usb: intc_usb {
num-pins = <2>;
};
};
p0_mbigen_alg_a:interrupt-controller@d0080000 {
compatible = "hisilicon,mbigen-v2";
reg = <0x0 0xd0080000 0x0 0x10000>;
p0_mbigen_sec_a: intc_sec {
msi-parent = <&p0_its_dsa_a 0x40400>;
interrupt-controller;
#interrupt-cells = <2>;
num-pins = <33>;
};
p0_mbigen_smmu_alg_a: intc_smmu_alg {
msi-parent = <&p0_its_dsa_a 0x40b1b>;
interrupt-controller;
#interrupt-cells = <2>;
num-pins = <3>;
};
};
p0_mbigen_alg_b:interrupt-controller@8,d0080000 {
compatible = "hisilicon,mbigen-v2";
reg = <0x8 0xd0080000 0x0 0x10000>;
p0_mbigen_sec_b: intc_sec {
msi-parent = <&p0_its_dsa_b 0x42400>;
interrupt-controller;
#interrupt-cells = <2>;
num-pins = <33>;
};
p0_mbigen_smmu_alg_b: intc_smmu_alg {
msi-parent = <&p0_its_dsa_b 0x42b1b>;
interrupt-controller;
#interrupt-cells = <2>;
num-pins = <3>;
};
};
p1_mbigen_alg_a:interrupt-controller@400,d0080000 {
compatible = "hisilicon,mbigen-v2";
reg = <0x400 0xd0080000 0x0 0x10000>;
p1_mbigen_sec_a: intc_sec {
msi-parent = <&p1_its_dsa_a 0x44400>;
interrupt-controller;
#interrupt-cells = <2>;
num-pins = <33>;
};
p1_mbigen_smmu_alg_a: intc_smmu_alg {
msi-parent = <&p1_its_dsa_a 0x44b1b>;
interrupt-controller;
#interrupt-cells = <2>;
num-pins = <3>;
};
};
p1_mbigen_alg_b:interrupt-controller@408,d0080000 {
compatible = "hisilicon,mbigen-v2";
reg = <0x408 0xd0080000 0x0 0x10000>;
p1_mbigen_sec_b: intc_sec {
msi-parent = <&p1_its_dsa_b 0x46400>;
interrupt-controller;
#interrupt-cells = <2>;
num-pins = <33>;
};
p1_mbigen_smmu_alg_b: intc_smmu_alg {
msi-parent = <&p1_its_dsa_b 0x46b1b>;
interrupt-controller;
#interrupt-cells = <2>;
num-pins = <3>;
};
};
p0_mbigen_dsa_a: interrupt-controller@c0080000 {
compatible = "hisilicon,mbigen-v2";
reg = <0x0 0xc0080000 0x0 0x10000>;
@ -1107,6 +1174,58 @@ smmu0: smmu_pcie {
hisilicon,broken-prefetch-cmd;
status = "disabled";
};
p0_smmu_alg_a: smmu_alg@d0040000 {
compatible = "arm,smmu-v3";
reg = <0x0 0xd0040000 0x0 0x20000>;
interrupt-parent = <&p0_mbigen_smmu_alg_a>;
interrupts = <733 1>,
<734 1>,
<735 1>;
interrupt-names = "eventq", "gerror", "priq";
#iommu-cells = <1>;
dma-coherent;
hisilicon,broken-prefetch-cmd;
/* smmu-cb-memtype = <0x0 0x1>;*/
};
p0_smmu_alg_b: smmu_alg@8,d0040000 {
compatible = "arm,smmu-v3";
reg = <0x8 0xd0040000 0x0 0x20000>;
interrupt-parent = <&p0_mbigen_smmu_alg_b>;
interrupts = <733 1>,
<734 1>,
<735 1>;
interrupt-names = "eventq", "gerror", "priq";
#iommu-cells = <1>;
dma-coherent;
hisilicon,broken-prefetch-cmd;
/* smmu-cb-memtype = <0x0 0x1>;*/
};
p1_smmu_alg_a: smmu_alg@400,d0040000 {
compatible = "arm,smmu-v3";
reg = <0x400 0xd0040000 0x0 0x20000>;
interrupt-parent = <&p1_mbigen_smmu_alg_a>;
interrupts = <733 1>,
<734 1>,
<735 1>;
interrupt-names = "eventq", "gerror", "priq";
#iommu-cells = <1>;
dma-coherent;
hisilicon,broken-prefetch-cmd;
/* smmu-cb-memtype = <0x0 0x1>;*/
};
p1_smmu_alg_b: smmu_alg@408,d0040000 {
compatible = "arm,smmu-v3";
reg = <0x408 0xd0040000 0x0 0x20000>;
interrupt-parent = <&p1_mbigen_smmu_alg_b>;
interrupts = <733 1>,
<734 1>,
<735 1>;
interrupt-names = "eventq", "gerror", "priq";
#iommu-cells = <1>;
dma-coherent;
hisilicon,broken-prefetch-cmd;
/* smmu-cb-memtype = <0x0 0x1>;*/
};
soc {
compatible = "simple-bus";
@ -1603,5 +1722,170 @@ p0_pcie2_a: pcie@a00a0000 {
0x0 0 0 4 &mbigen_pcie2_a 671 4>;
status = "disabled";
};
p0_sec_a: crypto@d2000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x0 0xd0000000 0x0 0x10000
0x0 0xd2000000 0x0 0x10000
0x0 0xd2010000 0x0 0x10000
0x0 0xd2020000 0x0 0x10000
0x0 0xd2030000 0x0 0x10000
0x0 0xd2040000 0x0 0x10000
0x0 0xd2050000 0x0 0x10000
0x0 0xd2060000 0x0 0x10000
0x0 0xd2070000 0x0 0x10000
0x0 0xd2080000 0x0 0x10000
0x0 0xd2090000 0x0 0x10000
0x0 0xd20a0000 0x0 0x10000
0x0 0xd20b0000 0x0 0x10000
0x0 0xd20c0000 0x0 0x10000
0x0 0xd20d0000 0x0 0x10000
0x0 0xd20e0000 0x0 0x10000
0x0 0xd20f0000 0x0 0x10000
0x0 0xd2100000 0x0 0x10000>;
interrupt-parent = <&p0_mbigen_sec_a>;
iommus = <&p0_smmu_alg_a 0x600>;
dma-coherent;
interrupts = <576 4>,
<577 1>, <578 4>,
<579 1>, <580 4>,
<581 1>, <582 4>,
<583 1>, <584 4>,
<585 1>, <586 4>,
<587 1>, <588 4>,
<589 1>, <590 4>,
<591 1>, <592 4>,
<593 1>, <594 4>,
<595 1>, <596 4>,
<597 1>, <598 4>,
<599 1>, <600 4>,
<601 1>, <602 4>,
<603 1>, <604 4>,
<605 1>, <606 4>,
<607 1>, <608 4>;
};
p0_sec_b: crypto@8,d2000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x8 0xd0000000 0x0 0x10000
0x8 0xd2000000 0x0 0x10000
0x8 0xd2010000 0x0 0x10000
0x8 0xd2020000 0x0 0x10000
0x8 0xd2030000 0x0 0x10000
0x8 0xd2040000 0x0 0x10000
0x8 0xd2050000 0x0 0x10000
0x8 0xd2060000 0x0 0x10000
0x8 0xd2070000 0x0 0x10000
0x8 0xd2080000 0x0 0x10000
0x8 0xd2090000 0x0 0x10000
0x8 0xd20a0000 0x0 0x10000
0x8 0xd20b0000 0x0 0x10000
0x8 0xd20c0000 0x0 0x10000
0x8 0xd20d0000 0x0 0x10000
0x8 0xd20e0000 0x0 0x10000
0x8 0xd20f0000 0x0 0x10000
0x8 0xd2100000 0x0 0x10000>;
interrupt-parent = <&p0_mbigen_sec_b>;
iommus = <&p0_smmu_alg_b 0x600>;
dma-coherent;
interrupts = <576 4>,
<577 1>, <578 4>,
<579 1>, <580 4>,
<581 1>, <582 4>,
<583 1>, <584 4>,
<585 1>, <586 4>,
<587 1>, <588 4>,
<589 1>, <590 4>,
<591 1>, <592 4>,
<593 1>, <594 4>,
<595 1>, <596 4>,
<597 1>, <598 4>,
<599 1>, <600 4>,
<601 1>, <602 4>,
<603 1>, <604 4>,
<605 1>, <606 4>,
<607 1>, <608 4>;
};
p1_sec_a: crypto@400,d2000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x400 0xd0000000 0x0 0x10000
0x400 0xd2000000 0x0 0x10000
0x400 0xd2010000 0x0 0x10000
0x400 0xd2020000 0x0 0x10000
0x400 0xd2030000 0x0 0x10000
0x400 0xd2040000 0x0 0x10000
0x400 0xd2050000 0x0 0x10000
0x400 0xd2060000 0x0 0x10000
0x400 0xd2070000 0x0 0x10000
0x400 0xd2080000 0x0 0x10000
0x400 0xd2090000 0x0 0x10000
0x400 0xd20a0000 0x0 0x10000
0x400 0xd20b0000 0x0 0x10000
0x400 0xd20c0000 0x0 0x10000
0x400 0xd20d0000 0x0 0x10000
0x400 0xd20e0000 0x0 0x10000
0x400 0xd20f0000 0x0 0x10000
0x400 0xd2100000 0x0 0x10000>;
interrupt-parent = <&p1_mbigen_sec_a>;
iommus = <&p1_smmu_alg_a 0x600>;
dma-coherent;
interrupts = <576 4>,
<577 1>, <578 4>,
<579 1>, <580 4>,
<581 1>, <582 4>,
<583 1>, <584 4>,
<585 1>, <586 4>,
<587 1>, <588 4>,
<589 1>, <590 4>,
<591 1>, <592 4>,
<593 1>, <594 4>,
<595 1>, <596 4>,
<597 1>, <598 4>,
<599 1>, <600 4>,
<601 1>, <602 4>,
<603 1>, <604 4>,
<605 1>, <606 4>,
<607 1>, <608 4>;
};
p1_sec_b: crypto@408,d2000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x408 0xd0000000 0x0 0x10000
0x408 0xd2000000 0x0 0x10000
0x408 0xd2010000 0x0 0x10000
0x408 0xd2020000 0x0 0x10000
0x408 0xd2030000 0x0 0x10000
0x408 0xd2040000 0x0 0x10000
0x408 0xd2050000 0x0 0x10000
0x408 0xd2060000 0x0 0x10000
0x408 0xd2070000 0x0 0x10000
0x408 0xd2080000 0x0 0x10000
0x408 0xd2090000 0x0 0x10000
0x408 0xd20a0000 0x0 0x10000
0x408 0xd20b0000 0x0 0x10000
0x408 0xd20c0000 0x0 0x10000
0x408 0xd20d0000 0x0 0x10000
0x408 0xd20e0000 0x0 0x10000
0x408 0xd20f0000 0x0 0x10000
0x408 0xd2100000 0x0 0x10000>;
interrupt-parent = <&p1_mbigen_sec_b>;
iommus = <&p1_smmu_alg_b 0x600>;
dma-coherent;
interrupts = <576 4>,
<577 1>, <578 4>,
<579 1>, <580 4>,
<581 1>, <582 4>,
<583 1>, <584 4>,
<585 1>, <586 4>,
<587 1>, <588 4>,
<589 1>, <590 4>,
<591 1>, <592 4>,
<593 1>, <594 4>,
<595 1>, <596 4>,
<597 1>, <598 4>,
<599 1>, <600 4>,
<601 1>, <602 4>,
<603 1>, <604 4>,
<605 1>, <606 4>,
<607 1>, <608 4>;
};
};
};

View File

@ -567,7 +567,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "cmac(aes)",
.base.cra_driver_name = "cmac-aes-" MODE,
.base.cra_priority = PRIO,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@ -583,7 +582,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "xcbc(aes)",
.base.cra_driver_name = "xcbc-aes-" MODE,
.base.cra_priority = PRIO,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@ -599,7 +597,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "cbcmac(aes)",
.base.cra_driver_name = "cbcmac-aes-" MODE,
.base.cra_priority = PRIO,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx),
.base.cra_module = THIS_MODULE,

View File

@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
* Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
* Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@ -46,6 +46,19 @@
ss3 .req v26
ss4 .req v27
XL2 .req v8
XM2 .req v9
XH2 .req v10
XL3 .req v11
XM3 .req v12
XH3 .req v13
TT3 .req v14
TT4 .req v15
HH .req v16
HH3 .req v17
HH4 .req v18
HH34 .req v19
.text
.arch armv8-a+crypto
@ -134,11 +147,25 @@
.endm
.macro __pmull_pre_p64
add x8, x3, #16
ld1 {HH.2d-HH4.2d}, [x8]
trn1 SHASH2.2d, SHASH.2d, HH.2d
trn2 T1.2d, SHASH.2d, HH.2d
eor SHASH2.16b, SHASH2.16b, T1.16b
trn1 HH34.2d, HH3.2d, HH4.2d
trn2 T1.2d, HH3.2d, HH4.2d
eor HH34.16b, HH34.16b, T1.16b
movi MASK.16b, #0xe1
shl MASK.2d, MASK.2d, #57
.endm
.macro __pmull_pre_p8
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
eor SHASH2.16b, SHASH2.16b, SHASH.16b
// k00_16 := 0x0000000000000000_000000000000ffff
// k32_48 := 0x00000000ffffffff_0000ffffffffffff
movi k32_48.2d, #0xffffffff
@ -213,31 +240,88 @@
.endm
.macro __pmull_ghash, pn
frame_push 5
mov x19, x0
mov x20, x1
mov x21, x2
mov x22, x3
mov x23, x4
0: ld1 {SHASH.2d}, [x22]
ld1 {XL.2d}, [x20]
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
eor SHASH2.16b, SHASH2.16b, SHASH.16b
ld1 {SHASH.2d}, [x3]
ld1 {XL.2d}, [x1]
__pmull_pre_\pn
/* do the head block first, if supplied */
cbz x23, 1f
ld1 {T1.2d}, [x23]
mov x23, xzr
b 2f
cbz x4, 0f
ld1 {T1.2d}, [x4]
mov x4, xzr
b 3f
1: ld1 {T1.2d}, [x21], #16
sub w19, w19, #1
0: .ifc \pn, p64
tbnz w0, #0, 2f // skip until #blocks is a
tbnz w0, #1, 2f // round multiple of 4
2: /* multiply XL by SHASH in GF(2^128) */
1: ld1 {XM3.16b-TT4.16b}, [x2], #64
sub w0, w0, #4
rev64 T1.16b, XM3.16b
rev64 T2.16b, XH3.16b
rev64 TT4.16b, TT4.16b
rev64 TT3.16b, TT3.16b
ext IN1.16b, TT4.16b, TT4.16b, #8
ext XL3.16b, TT3.16b, TT3.16b, #8
eor TT4.16b, TT4.16b, IN1.16b
pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
eor TT3.16b, TT3.16b, XL3.16b
pmull2 XH3.1q, HH.2d, XL3.2d // a1 * b1
pmull XL3.1q, HH.1d, XL3.1d // a0 * b0
pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
ext IN1.16b, T2.16b, T2.16b, #8
eor XL2.16b, XL2.16b, XL3.16b
eor XH2.16b, XH2.16b, XH3.16b
eor XM2.16b, XM2.16b, XM3.16b
eor T2.16b, T2.16b, IN1.16b
pmull2 XH3.1q, HH3.2d, IN1.2d // a1 * b1
pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0
pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
eor XL2.16b, XL2.16b, XL3.16b
eor XH2.16b, XH2.16b, XH3.16b
eor XM2.16b, XM2.16b, XM3.16b
ext IN1.16b, T1.16b, T1.16b, #8
ext TT3.16b, XL.16b, XL.16b, #8
eor XL.16b, XL.16b, IN1.16b
eor T1.16b, T1.16b, TT3.16b
pmull2 XH.1q, HH4.2d, XL.2d // a1 * b1
eor T1.16b, T1.16b, XL.16b
pmull XL.1q, HH4.1d, XL.1d // a0 * b0
pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
eor XL.16b, XL.16b, XL2.16b
eor XH.16b, XH.16b, XH2.16b
eor XM.16b, XM.16b, XM2.16b
eor T2.16b, XL.16b, XH.16b
ext T1.16b, XL.16b, XH.16b, #8
eor XM.16b, XM.16b, T2.16b
__pmull_reduce_p64
eor T2.16b, T2.16b, XH.16b
eor XL.16b, XL.16b, T2.16b
cbz w0, 5f
b 1b
.endif
2: ld1 {T1.2d}, [x2], #16
sub w0, w0, #1
3: /* multiply XL by SHASH in GF(2^128) */
CPU_LE( rev64 T1.16b, T1.16b )
ext T2.16b, XL.16b, XL.16b, #8
@ -250,7 +334,7 @@ CPU_LE( rev64 T1.16b, T1.16b )
__pmull_\pn XL, XL, SHASH // a0 * b0
__pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0)
eor T2.16b, XL.16b, XH.16b
4: eor T2.16b, XL.16b, XH.16b
ext T1.16b, XL.16b, XH.16b, #8
eor XM.16b, XM.16b, T2.16b
@ -259,18 +343,9 @@ CPU_LE( rev64 T1.16b, T1.16b )
eor T2.16b, T2.16b, XH.16b
eor XL.16b, XL.16b, T2.16b
cbz w19, 3f
cbnz w0, 0b
if_will_cond_yield_neon
st1 {XL.2d}, [x20]
do_cond_yield_neon
b 0b
endif_yield_neon
b 1b
3: st1 {XL.2d}, [x20]
frame_pop
5: st1 {XL.2d}, [x1]
ret
.endm
@ -286,9 +361,10 @@ ENTRY(pmull_ghash_update_p8)
__pmull_ghash p8
ENDPROC(pmull_ghash_update_p8)
KS .req v8
CTR .req v9
INP .req v10
KS0 .req v12
KS1 .req v13
INP0 .req v14
INP1 .req v15
.macro load_round_keys, rounds, rk
cmp \rounds, #12
@ -322,98 +398,128 @@ ENDPROC(pmull_ghash_update_p8)
.endm
.macro pmull_gcm_do_crypt, enc
ld1 {SHASH.2d}, [x4]
ld1 {SHASH.2d}, [x4], #16
ld1 {HH.2d}, [x4]
ld1 {XL.2d}, [x1]
ldr x8, [x5, #8] // load lower counter
load_round_keys w7, x6
movi MASK.16b, #0xe1
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
trn1 SHASH2.2d, SHASH.2d, HH.2d
trn2 T1.2d, SHASH.2d, HH.2d
CPU_LE( rev x8, x8 )
shl MASK.2d, MASK.2d, #57
eor SHASH2.16b, SHASH2.16b, SHASH.16b
eor SHASH2.16b, SHASH2.16b, T1.16b
.if \enc == 1
ldr x10, [sp]
ld1 {KS.16b}, [x10]
ld1 {KS0.16b-KS1.16b}, [x10]
.endif
0: ld1 {CTR.8b}, [x5] // load upper counter
ld1 {INP.16b}, [x3], #16
cbnz x6, 4f
0: ld1 {INP0.16b-INP1.16b}, [x3], #32
rev x9, x8
add x8, x8, #1
sub w0, w0, #1
ins CTR.d[1], x9 // set lower counter
add x11, x8, #1
add x8, x8, #2
.if \enc == 1
eor INP.16b, INP.16b, KS.16b // encrypt input
st1 {INP.16b}, [x2], #16
eor INP0.16b, INP0.16b, KS0.16b // encrypt input
eor INP1.16b, INP1.16b, KS1.16b
.endif
rev64 T1.16b, INP.16b
ld1 {KS0.8b}, [x5] // load upper counter
rev x11, x11
sub w0, w0, #2
mov KS1.8b, KS0.8b
ins KS0.d[1], x9 // set lower counter
ins KS1.d[1], x11
rev64 T1.16b, INP1.16b
cmp w7, #12
b.ge 2f // AES-192/256?
1: enc_round CTR, v21
ext T2.16b, XL.16b, XL.16b, #8
1: enc_round KS0, v21
ext IN1.16b, T1.16b, T1.16b, #8
enc_round CTR, v22
enc_round KS1, v21
pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
enc_round KS0, v22
eor T1.16b, T1.16b, IN1.16b
enc_round KS1, v22
pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
enc_round KS0, v23
pmull XM2.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
enc_round KS1, v23
rev64 T1.16b, INP0.16b
ext T2.16b, XL.16b, XL.16b, #8
enc_round KS0, v24
ext IN1.16b, T1.16b, T1.16b, #8
eor T1.16b, T1.16b, T2.16b
enc_round KS1, v24
eor XL.16b, XL.16b, IN1.16b
enc_round CTR, v23
pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
enc_round KS0, v25
eor T1.16b, T1.16b, XL.16b
enc_round CTR, v24
enc_round KS1, v25
pmull2 XH.1q, HH.2d, XL.2d // a1 * b1
pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
enc_round KS0, v26
pmull XL.1q, HH.1d, XL.1d // a0 * b0
enc_round CTR, v25
enc_round KS1, v26
pmull2 XM.1q, SHASH2.2d, T1.2d // (a1 + a0)(b1 + b0)
enc_round KS0, v27
eor XL.16b, XL.16b, XL2.16b
eor XH.16b, XH.16b, XH2.16b
enc_round KS1, v27
eor XM.16b, XM.16b, XM2.16b
ext T1.16b, XL.16b, XH.16b, #8
enc_round KS0, v28
eor T2.16b, XL.16b, XH.16b
eor XM.16b, XM.16b, T1.16b
enc_round CTR, v26
enc_round KS1, v28
eor XM.16b, XM.16b, T2.16b
enc_round KS0, v29
pmull T2.1q, XL.1d, MASK.1d
enc_round CTR, v27
enc_round KS1, v29
mov XH.d[0], XM.d[1]
mov XM.d[1], XL.d[0]
enc_round CTR, v28
aese KS0.16b, v30.16b
eor XL.16b, XM.16b, T2.16b
enc_round CTR, v29
aese KS1.16b, v30.16b
ext T2.16b, XL.16b, XL.16b, #8
aese CTR.16b, v30.16b
eor KS0.16b, KS0.16b, v31.16b
pmull XL.1q, XL.1d, MASK.1d
eor T2.16b, T2.16b, XH.16b
eor KS.16b, CTR.16b, v31.16b
eor KS1.16b, KS1.16b, v31.16b
eor XL.16b, XL.16b, T2.16b
.if \enc == 0
eor INP.16b, INP.16b, KS.16b
st1 {INP.16b}, [x2], #16
eor INP0.16b, INP0.16b, KS0.16b
eor INP1.16b, INP1.16b, KS1.16b
.endif
st1 {INP0.16b-INP1.16b}, [x2], #32
cbnz w0, 0b
CPU_LE( rev x8, x8 )
@ -421,17 +527,24 @@ CPU_LE( rev x8, x8 )
str x8, [x5, #8] // store lower counter
.if \enc == 1
st1 {KS.16b}, [x10]
st1 {KS0.16b-KS1.16b}, [x10]
.endif
ret
2: b.eq 3f // AES-192?
enc_round CTR, v17
enc_round CTR, v18
3: enc_round CTR, v19
enc_round CTR, v20
enc_round KS0, v17
enc_round KS1, v17
enc_round KS0, v18
enc_round KS1, v18
3: enc_round KS0, v19
enc_round KS1, v19
enc_round KS0, v20
enc_round KS1, v20
b 1b
4: load_round_keys w7, x6
b 0b
.endm
/*

View File

@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
* Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
* Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@ -33,9 +33,12 @@ MODULE_ALIAS_CRYPTO("ghash");
#define GCM_IV_SIZE 12
struct ghash_key {
u64 a;
u64 b;
be128 k;
u64 h[2];
u64 h2[2];
u64 h3[2];
u64 h4[2];
be128 k;
};
struct ghash_desc_ctx {
@ -113,6 +116,9 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
}
}
/* avoid hogging the CPU for too long */
#define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
static int ghash_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
@ -136,11 +142,16 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
blocks = len / GHASH_BLOCK_SIZE;
len %= GHASH_BLOCK_SIZE;
ghash_do_update(blocks, ctx->digest, src, key,
partial ? ctx->buf : NULL);
do {
int chunk = min(blocks, MAX_BLOCKS);
src += blocks * GHASH_BLOCK_SIZE;
partial = 0;
ghash_do_update(chunk, ctx->digest, src, key,
partial ? ctx->buf : NULL);
blocks -= chunk;
src += chunk * GHASH_BLOCK_SIZE;
partial = 0;
} while (unlikely(blocks > 0));
}
if (len)
memcpy(ctx->buf + partial, src, len);
@ -166,23 +177,36 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
return 0;
}
static void ghash_reflect(u64 h[], const be128 *k)
{
u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0;
h[0] = (be64_to_cpu(k->b) << 1) | carry;
h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
if (carry)
h[1] ^= 0xc200000000000000UL;
}
static int __ghash_setkey(struct ghash_key *key,
const u8 *inkey, unsigned int keylen)
{
u64 a, b;
be128 h;
/* needed for the fallback */
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
/* perform multiplication by 'x' in GF(2^128) */
b = get_unaligned_be64(inkey);
a = get_unaligned_be64(inkey + 8);
ghash_reflect(key->h, &key->k);
key->a = (a << 1) | (b >> 63);
key->b = (b << 1) | (a >> 63);
h = key->k;
gf128mul_lle(&h, &key->k);
ghash_reflect(key->h2, &h);
if (b >> 63)
key->b ^= 0xc200000000000000UL;
gf128mul_lle(&h, &key->k);
ghash_reflect(key->h3, &h);
gf128mul_lle(&h, &key->k);
ghash_reflect(key->h4, &h);
return 0;
}
@ -204,7 +228,6 @@ static struct shash_alg ghash_alg = {
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-ce",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key),
.base.cra_module = THIS_MODULE,
@ -245,7 +268,7 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
__aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){},
num_rounds(&ctx->aes_key));
return __ghash_setkey(&ctx->ghash_key, key, sizeof(key));
return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
}
static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
@ -349,9 +372,10 @@ static int gcm_encrypt(struct aead_request *req)
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
struct skcipher_walk walk;
u8 iv[AES_BLOCK_SIZE];
u8 ks[AES_BLOCK_SIZE];
u8 ks[2 * AES_BLOCK_SIZE];
u8 tag[AES_BLOCK_SIZE];
u64 dg[2] = {};
int nrounds = num_rounds(&ctx->aes_key);
int err;
if (req->assoclen)
@ -360,38 +384,38 @@ static int gcm_encrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
if (likely(may_use_simd())) {
err = skcipher_walk_aead_encrypt(&walk, req, false);
if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL;
kernel_neon_begin();
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
num_rounds(&ctx->aes_key));
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
pmull_gcm_encrypt_block(ks, iv, NULL,
num_rounds(&ctx->aes_key));
pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
put_unaligned_be32(3, iv + GCM_IV_SIZE);
kernel_neon_end();
pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
put_unaligned_be32(4, iv + GCM_IV_SIZE);
err = skcipher_walk_aead_encrypt(&walk, req, false);
do {
int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
if (rk)
kernel_neon_begin();
kernel_neon_begin();
pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, &ctx->ghash_key,
iv, ctx->aes_key.key_enc,
num_rounds(&ctx->aes_key), ks);
iv, rk, nrounds, ks);
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes % AES_BLOCK_SIZE);
}
} else {
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
num_rounds(&ctx->aes_key));
put_unaligned_be32(2, iv + GCM_IV_SIZE);
walk.nbytes % (2 * AES_BLOCK_SIZE));
err = skcipher_walk_aead_encrypt(&walk, req, false);
rk = ctx->aes_key.key_enc;
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
@ -400,8 +424,7 @@ static int gcm_encrypt(struct aead_request *req)
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
ks, iv,
num_rounds(&ctx->aes_key));
ks, iv, nrounds);
crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@ -418,19 +441,28 @@ static int gcm_encrypt(struct aead_request *req)
}
if (walk.nbytes)
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
num_rounds(&ctx->aes_key));
nrounds);
}
/* handle the tail */
if (walk.nbytes) {
u8 buf[GHASH_BLOCK_SIZE];
unsigned int nbytes = walk.nbytes;
u8 *dst = walk.dst.virt.addr;
u8 *head = NULL;
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks,
walk.nbytes);
memcpy(buf, walk.dst.virt.addr, walk.nbytes);
memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
if (walk.nbytes > GHASH_BLOCK_SIZE) {
head = dst;
dst += GHASH_BLOCK_SIZE;
nbytes %= GHASH_BLOCK_SIZE;
}
memcpy(buf, dst, nbytes);
memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
err = skcipher_walk_done(&walk, 0);
}
@ -453,10 +485,11 @@ static int gcm_decrypt(struct aead_request *req)
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
struct skcipher_walk walk;
u8 iv[AES_BLOCK_SIZE];
u8 iv[2 * AES_BLOCK_SIZE];
u8 tag[AES_BLOCK_SIZE];
u8 buf[GHASH_BLOCK_SIZE];
u8 buf[2 * GHASH_BLOCK_SIZE];
u64 dg[2] = {};
int nrounds = num_rounds(&ctx->aes_key);
int err;
if (req->assoclen)
@ -465,43 +498,53 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
if (likely(may_use_simd())) {
err = skcipher_walk_aead_decrypt(&walk, req, false);
if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL;
kernel_neon_begin();
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
num_rounds(&ctx->aes_key));
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
kernel_neon_end();
err = skcipher_walk_aead_decrypt(&walk, req, false);
do {
int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
int rem = walk.total - blocks * AES_BLOCK_SIZE;
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
if (rk)
kernel_neon_begin();
kernel_neon_begin();
pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, &ctx->ghash_key,
iv, ctx->aes_key.key_enc,
num_rounds(&ctx->aes_key));
iv, rk, nrounds);
/* check if this is the final iteration of the loop */
if (rem < (2 * AES_BLOCK_SIZE)) {
u8 *iv2 = iv + AES_BLOCK_SIZE;
if (rem > AES_BLOCK_SIZE) {
memcpy(iv2, iv, AES_BLOCK_SIZE);
crypto_inc(iv2, AES_BLOCK_SIZE);
}
pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
if (rem > AES_BLOCK_SIZE)
pmull_gcm_encrypt_block(iv2, iv2, NULL,
nrounds);
}
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes % AES_BLOCK_SIZE);
}
if (walk.nbytes) {
kernel_neon_begin();
pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc,
num_rounds(&ctx->aes_key));
kernel_neon_end();
}
walk.nbytes % (2 * AES_BLOCK_SIZE));
rk = ctx->aes_key.key_enc;
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
num_rounds(&ctx->aes_key));
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
err = skcipher_walk_aead_decrypt(&walk, req, false);
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
@ -512,8 +555,7 @@ static int gcm_decrypt(struct aead_request *req)
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
buf, iv,
num_rounds(&ctx->aes_key));
buf, iv, nrounds);
crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@ -526,14 +568,24 @@ static int gcm_decrypt(struct aead_request *req)
}
if (walk.nbytes)
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
num_rounds(&ctx->aes_key));
nrounds);
}
/* handle the tail */
if (walk.nbytes) {
memcpy(buf, walk.src.virt.addr, walk.nbytes);
memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
const u8 *src = walk.src.virt.addr;
const u8 *head = NULL;
unsigned int nbytes = walk.nbytes;
if (walk.nbytes > GHASH_BLOCK_SIZE) {
head = src;
src += GHASH_BLOCK_SIZE;
nbytes %= GHASH_BLOCK_SIZE;
}
memcpy(buf, src, nbytes);
memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
walk.nbytes);
@ -558,7 +610,7 @@ static int gcm_decrypt(struct aead_request *req)
static struct aead_alg gcm_aes_alg = {
.ivsize = GCM_IV_SIZE,
.chunksize = AES_BLOCK_SIZE,
.chunksize = 2 * AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm_setkey,
.setauthsize = gcm_setauthsize,

View File

@ -99,7 +99,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -114,7 +114,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ce",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -129,7 +128,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ce",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -67,8 +67,7 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64",
.base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_priority = 125,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@ -80,8 +79,7 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64",
.base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_priority = 125,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
@ -153,7 +151,6 @@ static struct shash_alg neon_algs[] = { {
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64-neon",
.base.cra_priority = 150,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@ -166,7 +163,6 @@ static struct shash_alg neon_algs[] = { {
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64-neon",
.base.cra_priority = 150,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };

View File

@ -105,7 +105,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-ce",
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@ -117,7 +116,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-ce",
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@ -129,7 +127,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-ce",
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@ -141,7 +138,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-ce",
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,

View File

@ -87,7 +87,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-ce",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@ -100,7 +99,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-ce",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };

View File

@ -63,7 +63,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-arm64",
.base.cra_priority = 150,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@ -76,7 +75,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-arm64",
.base.cra_priority = 150,
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };

View File

@ -72,7 +72,6 @@ static struct shash_alg sm3_alg = {
.descsize = sizeof(struct sm3_state),
.base.cra_name = "sm3",
.base.cra_driver_name = "sm3-ce",
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SM3_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,

View File

@ -182,7 +182,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "octeon-md5",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -215,7 +215,6 @@ static struct shash_alg octeon_sha1_alg = {
.cra_name = "sha1",
.cra_driver_name= "octeon-sha1",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -239,7 +239,6 @@ static struct shash_alg octeon_sha256_algs[2] = { {
.cra_name = "sha256",
.cra_driver_name= "octeon-sha256",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -252,7 +251,6 @@ static struct shash_alg octeon_sha256_algs[2] = { {
.base = {
.cra_name = "sha224",
.cra_driver_name= "octeon-sha224",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -235,7 +235,6 @@ static struct shash_alg octeon_sha512_algs[2] = { {
.cra_name = "sha512",
.cra_driver_name= "octeon-sha512",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -249,7 +248,6 @@ static struct shash_alg octeon_sha512_algs[2] = { {
.cra_name = "sha384",
.cra_driver_name= "octeon-sha384",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -139,7 +139,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "md5-ppc",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -185,7 +185,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ppc-spe",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -132,7 +132,6 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-powerpc",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -231,7 +231,6 @@ static struct shash_alg algs[2] = { {
.cra_name = "sha256",
.cra_driver_name= "sha256-ppc-spe",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -248,7 +247,6 @@ static struct shash_alg algs[2] = { {
.cra_name = "sha224",
.cra_driver_name= "sha224-ppc-spe",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -1035,7 +1035,6 @@ static struct aead_alg gcm_aes_aead = {
.chunksize = AES_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_priority = 900,

View File

@ -128,7 +128,6 @@ static struct shash_alg ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,

View File

@ -78,7 +78,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -71,7 +71,6 @@ static struct shash_alg sha256_alg = {
.cra_name = "sha256",
.cra_driver_name= "sha256-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -108,7 +107,6 @@ static struct shash_alg sha224_alg = {
.cra_name = "sha224",
.cra_driver_name= "sha224-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -76,7 +76,6 @@ static struct shash_alg sha512_alg = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -115,7 +114,6 @@ static struct shash_alg sha384_alg = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,

View File

@ -144,7 +144,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "md5-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -139,7 +139,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -169,7 +169,6 @@ static struct shash_alg sha256 = {
.cra_name = "sha256",
.cra_driver_name= "sha256-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -185,7 +184,6 @@ static struct shash_alg sha224 = {
.cra_name = "sha224",
.cra_driver_name= "sha224-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -154,7 +154,6 @@ static struct shash_alg sha512 = {
.cra_name = "sha512",
.cra_driver_name= "sha512-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -170,7 +169,6 @@ static struct shash_alg sha384 = {
.cra_name = "sha384",
.cra_driver_name= "sha384-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -154,8 +154,7 @@ static struct shash_alg ghash_alg = {
.cra_name = "__ghash",
.cra_driver_name = "__ghash-pclmulqdqni",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_SHASH |
CRYPTO_ALG_INTERNAL,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
@ -315,9 +314,8 @@ static struct ahash_alg ghash_async_alg = {
.cra_driver_name = "ghash-clmulni",
.cra_priority = 400,
.cra_ctxsize = sizeof(struct ghash_async_ctx),
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_init = ghash_async_init_tfm,
.cra_exit = ghash_async_exit_tfm,

View File

@ -169,7 +169,6 @@ static struct shash_alg alg = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-simd",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},

View File

@ -746,9 +746,8 @@ static struct ahash_alg sha1_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_INTERNAL,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@ -871,10 +870,16 @@ static struct ahash_alg sha1_mb_async_alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1_mb",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
/*
* Low priority, since with few concurrent hash requests
* this is extremely slow due to the flush delay. Users
* whose workloads would benefit from this can request
* it explicitly by driver name, or can increase its
* priority at runtime using NETLINK_CRYPTO.
*/
.cra_priority = 50,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
.cra_init = sha1_mb_async_init_tfm,

View File

@ -104,7 +104,6 @@ static struct shash_alg sha1_ssse3_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ssse3",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -157,7 +156,6 @@ static struct shash_alg sha1_avx_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx",
.cra_priority = 160,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -249,7 +247,6 @@ static struct shash_alg sha1_avx2_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx2",
.cra_priority = 170,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -307,7 +304,6 @@ static struct shash_alg sha1_ni_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ni",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -745,9 +745,8 @@ static struct ahash_alg sha256_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_INTERNAL,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@ -870,11 +869,16 @@ static struct ahash_alg sha256_mb_async_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256_mb",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
/*
* Low priority, since with few concurrent hash requests
* this is extremely slow due to the flush delay. Users
* whose workloads would benefit from this can request
* it explicitly by driver name, or can increase its
* priority at runtime using NETLINK_CRYPTO.
*/
.cra_priority = 50,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
(sha256_mb_async_alg.halg.base.cra_list),

View File

@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
vmovd _args_digest(state , idx, 4) , %xmm0
vmovd _args_digest+4*32(state, idx, 4), %xmm1
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1

View File

@ -109,7 +109,6 @@ static struct shash_alg sha256_ssse3_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ssse3",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -124,7 +123,6 @@ static struct shash_alg sha256_ssse3_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ssse3",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -177,7 +175,6 @@ static struct shash_alg sha256_avx_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx",
.cra_priority = 160,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -192,7 +189,6 @@ static struct shash_alg sha256_avx_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx",
.cra_priority = 160,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -261,7 +257,6 @@ static struct shash_alg sha256_avx2_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx2",
.cra_priority = 170,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -276,7 +271,6 @@ static struct shash_alg sha256_avx2_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx2",
.cra_priority = 170,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -343,7 +337,6 @@ static struct shash_alg sha256_ni_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ni",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -358,7 +351,6 @@ static struct shash_alg sha256_ni_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ni",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -778,9 +778,8 @@ static struct ahash_alg sha512_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_INTERNAL,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@ -904,11 +903,16 @@ static struct ahash_alg sha512_mb_async_alg = {
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512_mb",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
/*
* Low priority, since with few concurrent hash requests
* this is extremely slow due to the flush delay. Users
* whose workloads would benefit from this can request
* it explicitly by driver name, or can increase its
* priority at runtime using NETLINK_CRYPTO.
*/
.cra_priority = 50,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
(sha512_mb_async_alg.halg.base.cra_list),

View File

@ -109,7 +109,6 @@ static struct shash_alg sha512_ssse3_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-ssse3",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -124,7 +123,6 @@ static struct shash_alg sha512_ssse3_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-ssse3",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -188,7 +186,6 @@ static struct shash_alg sha512_avx_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx",
.cra_priority = 160,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -203,7 +200,6 @@ static struct shash_alg sha512_avx_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx",
.cra_priority = 160,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -261,7 +257,6 @@ static struct shash_alg sha512_avx2_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx2",
.cra_priority = 170,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -276,7 +271,6 @@ static struct shash_alg sha512_avx2_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx2",
.cra_priority = 170,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
unsigned int bsize)
static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
unsigned int n)
{
unsigned int n = bsize;
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
n -= len_this_page;
scatterwalk_start(&walk->out, sg_next(walk->out.sg));
}
return bsize;
}
static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
unsigned int n)
static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
unsigned int n)
{
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
return n;
}
static int ablkcipher_walk_next(struct ablkcipher_request *req,
@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err)
{
struct crypto_tfm *tfm = req->base.tfm;
unsigned int nbytes = 0;
unsigned int n; /* bytes processed */
bool more;
if (likely(err >= 0)) {
unsigned int n = walk->nbytes - err;
if (unlikely(err < 0))
goto finish;
if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
n = ablkcipher_done_fast(walk, n);
else if (WARN_ON(err)) {
n = walk->nbytes - err;
walk->total -= n;
more = (walk->total != 0);
if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
ablkcipher_done_fast(walk, n);
} else {
if (WARN_ON(err)) {
/* unexpected case; didn't process all bytes */
err = -EINVAL;
goto err;
} else
n = ablkcipher_done_slow(walk, n);
nbytes = walk->total - n;
err = 0;
goto finish;
}
ablkcipher_done_slow(walk, n);
}
scatterwalk_done(&walk->in, 0, nbytes);
scatterwalk_done(&walk->out, 1, nbytes);
scatterwalk_done(&walk->in, 0, more);
scatterwalk_done(&walk->out, 1, more);
err:
walk->total = nbytes;
walk->nbytes = nbytes;
if (nbytes) {
if (more) {
crypto_yield(req->base.flags);
return ablkcipher_walk_next(req, walk);
}
err = 0;
finish:
walk->nbytes = 0;
if (walk->iv != req->info)
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
kfree(walk->iv_buffer);
return err;
}
EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
@ -373,6 +368,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@ -447,6 +443,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
sizeof(rblkcipher.geniv));
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;

View File

@ -429,7 +429,6 @@ static struct aead_alg crypto_aegis128_alg = {
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,

View File

@ -121,7 +121,7 @@ static void crypto_aegis128l_ad(struct aegis_state *state,
(const union aegis_chunk *)src;
while (size >= AEGIS128L_CHUNK_SIZE) {
crypto_aegis128l_update_a(state, src_chunk);
crypto_aegis128l_update_a(state, src_chunk);
size -= AEGIS128L_CHUNK_SIZE;
src_chunk += 1;
@ -493,7 +493,6 @@ static struct aead_alg crypto_aegis128l_alg = {
.chunksize = AEGIS128L_CHUNK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,

View File

@ -444,7 +444,6 @@ static struct aead_alg crypto_aegis256_alg = {
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,

View File

@ -70,19 +70,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
unsigned int bsize)
static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
unsigned int bsize)
{
u8 *addr;
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
addr = blkcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
return bsize;
}
static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
unsigned int n)
static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
unsigned int n)
{
if (walk->flags & BLKCIPHER_WALK_COPY) {
blkcipher_map_dst(walk);
@ -96,49 +95,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
return n;
}
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err)
{
unsigned int nbytes = 0;
unsigned int n; /* bytes processed */
bool more;
if (likely(err >= 0)) {
unsigned int n = walk->nbytes - err;
if (unlikely(err < 0))
goto finish;
if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
n = blkcipher_done_fast(walk, n);
else if (WARN_ON(err)) {
n = walk->nbytes - err;
walk->total -= n;
more = (walk->total != 0);
if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
blkcipher_done_fast(walk, n);
} else {
if (WARN_ON(err)) {
/* unexpected case; didn't process all bytes */
err = -EINVAL;
goto err;
} else
n = blkcipher_done_slow(walk, n);
nbytes = walk->total - n;
err = 0;
goto finish;
}
blkcipher_done_slow(walk, n);
}
scatterwalk_done(&walk->in, 0, nbytes);
scatterwalk_done(&walk->out, 1, nbytes);
scatterwalk_done(&walk->in, 0, more);
scatterwalk_done(&walk->out, 1, more);
err:
walk->total = nbytes;
walk->nbytes = nbytes;
if (nbytes) {
if (more) {
crypto_yield(desc->flags);
return blkcipher_walk_next(desc, walk);
}
err = 0;
finish:
walk->nbytes = 0;
if (walk->iv != desc->info)
memcpy(desc->info, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
free_page((unsigned long)walk->page);
return err;
}
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
@ -512,6 +510,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;

View File

@ -104,7 +104,6 @@ static struct shash_alg digest_null = {
.final = null_final,
.base = {
.cra_name = "digest_null",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -16,14 +16,16 @@
#include <linux/mpi.h>
struct dh_ctx {
MPI p;
MPI g;
MPI xa;
MPI p; /* Value is guaranteed to be set. */
MPI q; /* Value is optional. */
MPI g; /* Value is guaranteed to be set. */
MPI xa; /* Value is guaranteed to be set. */
};
static void dh_clear_ctx(struct dh_ctx *ctx)
{
mpi_free(ctx->p);
mpi_free(ctx->q);
mpi_free(ctx->g);
mpi_free(ctx->xa);
memset(ctx, 0, sizeof(*ctx));
@ -60,6 +62,12 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
if (!ctx->p)
return -EINVAL;
if (params->q && params->q_size) {
ctx->q = mpi_read_raw_data(params->q, params->q_size);
if (!ctx->q)
return -EINVAL;
}
ctx->g = mpi_read_raw_data(params->g, params->g_size);
if (!ctx->g)
return -EINVAL;
@ -93,6 +101,55 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
/*
* SP800-56A public key verification:
*
* * If Q is provided as part of the domain paramenters, a full validation
* according to SP800-56A section 5.6.2.3.1 is performed.
*
* * If Q is not provided, a partial validation according to SP800-56A section
* 5.6.2.3.2 is performed.
*/
static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
{
if (unlikely(!ctx->p))
return -EINVAL;
/*
* Step 1: Verify that 2 <= y <= p - 2.
*
* The upper limit check is actually y < p instead of y < p - 1
* as the mpi_sub_ui function is yet missing.
*/
if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0)
return -EINVAL;
/* Step 2: Verify that 1 = y^q mod p */
if (ctx->q) {
MPI val = mpi_alloc(0);
int ret;
if (!val)
return -ENOMEM;
ret = mpi_powm(val, y, ctx->q, ctx->p);
if (ret) {
mpi_free(val);
return ret;
}
ret = mpi_cmp_ui(val, 1);
mpi_free(val);
if (ret != 0)
return -EINVAL;
}
return 0;
}
static int dh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
@ -115,6 +172,9 @@ static int dh_compute_value(struct kpp_request *req)
ret = -EINVAL;
goto err_free_val;
}
ret = dh_is_pubkey_valid(ctx, base);
if (ret)
goto err_free_base;
} else {
base = ctx->g;
}

View File

@ -14,10 +14,12 @@
#include <crypto/dh.h>
#include <crypto/kpp.h>
#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 4 * sizeof(int))
static inline u8 *dh_pack_data(void *dst, const void *src, size_t size)
static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size)
{
if (!dst || size > end - dst)
return NULL;
memcpy(dst, src, size);
return dst + size;
}
@ -30,7 +32,7 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
static inline unsigned int dh_data_size(const struct dh *p)
{
return p->key_size + p->p_size + p->g_size;
return p->key_size + p->p_size + p->q_size + p->g_size;
}
unsigned int crypto_dh_key_len(const struct dh *p)
@ -42,25 +44,27 @@ EXPORT_SYMBOL_GPL(crypto_dh_key_len);
int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
{
u8 *ptr = buf;
u8 * const end = ptr + len;
struct kpp_secret secret = {
.type = CRYPTO_KPP_SECRET_TYPE_DH,
.len = len
};
if (unlikely(!buf))
if (unlikely(!len))
return -EINVAL;
if (len != crypto_dh_key_len(params))
ptr = dh_pack_data(ptr, end, &secret, sizeof(secret));
ptr = dh_pack_data(ptr, end, &params->key_size,
sizeof(params->key_size));
ptr = dh_pack_data(ptr, end, &params->p_size, sizeof(params->p_size));
ptr = dh_pack_data(ptr, end, &params->q_size, sizeof(params->q_size));
ptr = dh_pack_data(ptr, end, &params->g_size, sizeof(params->g_size));
ptr = dh_pack_data(ptr, end, params->key, params->key_size);
ptr = dh_pack_data(ptr, end, params->p, params->p_size);
ptr = dh_pack_data(ptr, end, params->q, params->q_size);
ptr = dh_pack_data(ptr, end, params->g, params->g_size);
if (ptr != end)
return -EINVAL;
ptr = dh_pack_data(ptr, &secret, sizeof(secret));
ptr = dh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
ptr = dh_pack_data(ptr, &params->p_size, sizeof(params->p_size));
ptr = dh_pack_data(ptr, &params->g_size, sizeof(params->g_size));
ptr = dh_pack_data(ptr, params->key, params->key_size);
ptr = dh_pack_data(ptr, params->p, params->p_size);
dh_pack_data(ptr, params->g, params->g_size);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
@ -79,6 +83,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
ptr = dh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
ptr = dh_unpack_data(&params->p_size, ptr, sizeof(params->p_size));
ptr = dh_unpack_data(&params->q_size, ptr, sizeof(params->q_size));
ptr = dh_unpack_data(&params->g_size, ptr, sizeof(params->g_size));
if (secret.len != crypto_dh_key_len(params))
return -EINVAL;
@ -88,7 +93,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
* some drivers assume otherwise.
*/
if (params->key_size > params->p_size ||
params->g_size > params->p_size)
params->g_size > params->p_size || params->q_size > params->p_size)
return -EINVAL;
/* Don't allocate memory. Set pointers to data within
@ -96,7 +101,9 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
*/
params->key = (void *)ptr;
params->p = (void *)(ptr + params->key_size);
params->g = (void *)(ptr + params->key_size + params->p_size);
params->q = (void *)(ptr + params->key_size + params->p_size);
params->g = (void *)(ptr + params->key_size + params->p_size +
params->q_size);
/*
* Don't permit 'p' to be 0. It's not a prime number, and it's subject
@ -106,6 +113,10 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
if (memchr_inv(params->p, 0, params->p_size) == NULL)
return -EINVAL;
/* It is permissible to not provide Q. */
if (params->q_size == 0)
params->q = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);

View File

@ -261,8 +261,7 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inbuflen,
u8 *outbuf, u32 outlen);
#define DRBG_CTR_NULL_LEN 128
#define DRBG_OUTSCRATCHLEN DRBG_CTR_NULL_LEN
#define DRBG_OUTSCRATCHLEN 256
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg,
@ -555,8 +554,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
}
/* 10.2.1.5.2 step 4.1 */
ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN,
buf, len);
ret = drbg_kcapi_sym_ctr(drbg, NULL, 0, buf, len);
if (ret)
return ret;
@ -1644,9 +1642,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
skcipher_request_free(drbg->ctr_req);
drbg->ctr_req = NULL;
kfree(drbg->ctr_null_value_buf);
drbg->ctr_null_value = NULL;
kfree(drbg->outscratchpadbuf);
drbg->outscratchpadbuf = NULL;
@ -1697,15 +1692,6 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
GFP_KERNEL);
if (!drbg->ctr_null_value_buf) {
drbg_fini_sym_kernel(drbg);
return -ENOMEM;
}
drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
alignmask + 1);
drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask,
GFP_KERNEL);
if (!drbg->outscratchpadbuf) {
@ -1715,6 +1701,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf,
alignmask + 1);
sg_init_table(&drbg->sg_in, 1);
sg_init_one(&drbg->sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
return alignmask;
}
@ -1743,17 +1732,25 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
{
struct scatterlist sg_in, sg_out;
struct scatterlist *sg_in = &drbg->sg_in, *sg_out = &drbg->sg_out;
u32 scratchpad_use = min_t(u32, outlen, DRBG_OUTSCRATCHLEN);
int ret;
sg_init_one(&sg_in, inbuf, inlen);
sg_init_one(&sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
if (inbuf) {
/* Use caller-provided input buffer */
sg_set_buf(sg_in, inbuf, inlen);
} else {
/* Use scratchpad for in-place operation */
inlen = scratchpad_use;
memset(drbg->outscratchpad, 0, scratchpad_use);
sg_set_buf(sg_in, drbg->outscratchpad, scratchpad_use);
}
while (outlen) {
u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
/* Output buffer may not be valid for SGL, use scratchpad */
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
skcipher_request_set_crypt(drbg->ctr_req, sg_in, sg_out,
cryptlen, drbg->V);
ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
&drbg->ctr_wait);
@ -1763,6 +1760,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
memzero_explicit(drbg->outscratchpad, cryptlen);
outlen -= cryptlen;
outbuf += cryptlen;
@ -1770,7 +1768,6 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
ret = 0;
out:
memzero_explicit(drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
return ret;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */

View File

@ -1019,6 +1019,36 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
return ret;
}
/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
struct ecc_point *pk)
{
u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
/* Check 1: Verify key is not the zero point. */
if (ecc_point_is_zero(pk))
return -EINVAL;
/* Check 2: Verify key is in the range [1, p-1]. */
if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
return -EINVAL;
if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
return -EINVAL;
/* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */
vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */
vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */
vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */
vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */
return -EINVAL;
return 0;
}
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
u64 *secret)
@ -1046,16 +1076,20 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto out;
}
ecc_swap_digits(public_key, pk->x, ndigits);
ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
ret = ecc_is_pubkey_valid_partial(curve, pk);
if (ret)
goto err_alloc_product;
ecc_swap_digits(private_key, priv, ndigits);
product = ecc_alloc_point(ndigits);
if (!product) {
ret = -ENOMEM;
goto err_alloc_product;
}
ecc_swap_digits(public_key, pk->x, ndigits);
ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
ecc_swap_digits(private_key, priv, ndigits);
ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
ecc_swap_digits(product->x, secret, ndigits);

View File

@ -13,9 +13,11 @@ struct ecc_curve {
struct ecc_point g;
u64 *p;
u64 *n;
u64 *a;
u64 *b;
};
/* NIST P-192 */
/* NIST P-192: a = p - 3 */
static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
0x188DA80EB03090F6ull };
static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
@ -24,6 +26,10 @@ static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull,
0xFFFFFFFFFFFFFFFFull };
static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
0xFFFFFFFFFFFFFFFFull };
static u64 nist_p192_a[] = { 0xFFFFFFFFFFFFFFFCull, 0xFFFFFFFFFFFFFFFEull,
0xFFFFFFFFFFFFFFFFull };
static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull,
0x64210519E59C80E7ull };
static struct ecc_curve nist_p192 = {
.name = "nist_192",
.g = {
@ -32,10 +38,12 @@ static struct ecc_curve nist_p192 = {
.ndigits = 3,
},
.p = nist_p192_p,
.n = nist_p192_n
.n = nist_p192_n,
.a = nist_p192_a,
.b = nist_p192_b
};
/* NIST P-256 */
/* NIST P-256: a = p - 3 */
static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
@ -44,6 +52,10 @@ static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull,
0x0000000000000000ull, 0xFFFFFFFF00000001ull };
static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
static u64 nist_p256_a[] = { 0xFFFFFFFFFFFFFFFCull, 0x00000000FFFFFFFFull,
0x0000000000000000ull, 0xFFFFFFFF00000001ull };
static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull,
0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull };
static struct ecc_curve nist_p256 = {
.name = "nist_256",
.g = {
@ -52,7 +64,9 @@ static struct ecc_curve nist_p256 = {
.ndigits = 4,
},
.p = nist_p256_p,
.n = nist_p256_n
.n = nist_p256_n,
.a = nist_p256_a,
.b = nist_p256_b
};
#endif

View File

@ -132,7 +132,6 @@ static struct shash_alg ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,

View File

@ -188,7 +188,7 @@ static int post_crypt(struct skcipher_request *req)
if (rctx->dst != sg) {
rctx->dst[0] = *sg;
sg_unmark_end(rctx->dst);
scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
}
rctx->dst[0].length -= offset - sg->offset;
rctx->dst[0].offset = offset;
@ -265,7 +265,7 @@ static int pre_crypt(struct skcipher_request *req)
if (rctx->src != sg) {
rctx->src[0] = *sg;
sg_unmark_end(rctx->src);
scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
}
rctx->src[0].length -= offset - sg->offset;
rctx->src[0].offset = offset;

View File

@ -217,7 +217,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct md4_ctx),
.base = {
.cra_name = "md4",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -229,7 +229,6 @@ static struct shash_alg alg = {
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -514,7 +514,6 @@ static struct aead_alg crypto_morus1280_alg = {
.chunksize = MORUS1280_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus1280_ctx),
.cra_alignmask = 0,

View File

@ -511,7 +511,6 @@ static struct aead_alg crypto_morus640_alg = {
.chunksize = MORUS640_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus640_ctx),
.cra_alignmask = 0,

View File

@ -279,7 +279,6 @@ static struct shash_alg poly1305_alg = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},

View File

@ -303,7 +303,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd128_ctx),
.base = {
.cra_name = "rmd128",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD128_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -347,7 +347,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd160_ctx),
.base = {
.cra_name = "rmd160",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -49,7 +49,7 @@ struct rmd256_ctx {
static void rmd256_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
/* Initialize left lane */
aa = state[0];
@ -100,7 +100,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
/* Swap contents of "a" registers */
tmp = aa; aa = aaa; aaa = tmp;
swap(aa, aaa);
/* round 2: left lane */
ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
@ -139,7 +139,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
/* Swap contents of "b" registers */
tmp = bb; bb = bbb; bbb = tmp;
swap(bb, bbb);
/* round 3: left lane */
ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
@ -178,7 +178,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
/* Swap contents of "c" registers */
tmp = cc; cc = ccc; ccc = tmp;
swap(cc, ccc);
/* round 4: left lane */
ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
@ -217,7 +217,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
/* Swap contents of "d" registers */
tmp = dd; dd = ddd; ddd = tmp;
swap(dd, ddd);
/* combine results */
state[0] += aa;
@ -322,7 +322,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd256_ctx),
.base = {
.cra_name = "rmd256",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -53,7 +53,7 @@ struct rmd320_ctx {
static void rmd320_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
/* Initialize left lane */
aa = state[0];
@ -106,7 +106,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
/* Swap contents of "a" registers */
tmp = aa; aa = aaa; aaa = tmp;
swap(aa, aaa);
/* round 2: left lane" */
ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
@ -145,7 +145,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
/* Swap contents of "b" registers */
tmp = bb; bb = bbb; bbb = tmp;
swap(bb, bbb);
/* round 3: left lane" */
ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
@ -184,7 +184,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
/* Swap contents of "c" registers */
tmp = cc; cc = ccc; ccc = tmp;
swap(cc, ccc);
/* round 4: left lane" */
ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
@ -223,7 +223,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
/* Swap contents of "d" registers */
tmp = dd; dd = ddd; ddd = tmp;
swap(dd, ddd);
/* round 5: left lane" */
ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
@ -262,7 +262,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
/* Swap contents of "e" registers */
tmp = ee; ee = eee; eee = tmp;
swap(ee, eee);
/* combine results */
state[0] += aa;
@ -371,7 +371,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd320_ctx),
.base = {
.cra_name = "rmd320",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD320_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -91,7 +91,7 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
sg_init_table(dst, 2);
sg_set_page(dst, sg_page(src), src->length - len, src->offset + len);
scatterwalk_crypto_chain(dst, sg_next(src), 0, 2);
scatterwalk_crypto_chain(dst, sg_next(src), 2);
return dst;
}

View File

@ -76,7 +76,7 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_priority = 100,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -271,7 +271,7 @@ static struct shash_alg sha256_algs[2] = { {
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_priority = 100,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -285,7 +285,7 @@ static struct shash_alg sha256_algs[2] = { {
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-generic",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_priority = 100,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -250,7 +250,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-generic",
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@ -261,7 +260,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-generic",
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@ -272,7 +270,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-generic",
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@ -283,7 +280,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-generic",
.base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };

View File

@ -23,6 +23,28 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38,
0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a,
0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43,
0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda,
0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb,
0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b
};
EXPORT_SYMBOL_GPL(sha384_zero_message_hash);
const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = {
0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd,
0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07,
0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc,
0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce,
0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0,
0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f,
0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81,
0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e
};
EXPORT_SYMBOL_GPL(sha512_zero_message_hash);
static inline u64 Ch(u64 x, u64 y, u64 z)
{
return z ^ (x & (y ^ z));
@ -171,7 +193,7 @@ static struct shash_alg sha512_algs[2] = { {
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-generic",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_priority = 100,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -185,7 +207,7 @@ static struct shash_alg sha512_algs[2] = { {
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-generic",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_priority = 100,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
addr = skcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize,
(walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
return 0;
}
int skcipher_walk_done(struct skcipher_walk *walk, int err)
{
unsigned int n = walk->nbytes - err;
unsigned int nbytes;
unsigned int n; /* bytes processed */
bool more;
nbytes = walk->total - n;
if (unlikely(err < 0))
goto finish;
if (unlikely(err < 0)) {
nbytes = 0;
n = 0;
} else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
n = walk->nbytes - err;
walk->total -= n;
more = (walk->total != 0);
if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
unmap_src:
skcipher_unmap_src(walk);
} else if (walk->flags & SKCIPHER_WALK_DIFF) {
@ -131,28 +132,28 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
if (WARN_ON(err)) {
/* unexpected case; didn't process all bytes */
err = -EINVAL;
nbytes = 0;
} else
n = skcipher_done_slow(walk, n);
goto finish;
}
skcipher_done_slow(walk, n);
goto already_advanced;
}
if (err > 0)
err = 0;
walk->total = nbytes;
walk->nbytes = nbytes;
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
scatterwalk_done(&walk->in, 0, nbytes);
scatterwalk_done(&walk->out, 1, nbytes);
already_advanced:
scatterwalk_done(&walk->in, 0, more);
scatterwalk_done(&walk->out, 1, more);
if (nbytes) {
if (more) {
crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
CRYPTO_TFM_REQ_MAY_SLEEP : 0);
return skcipher_walk_next(walk);
}
err = 0;
finish:
walk->nbytes = 0;
/* Short-circuit for the common/fast path. */
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
@ -387,7 +388,6 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
}
return err;
}
EXPORT_SYMBOL_GPL(skcipher_walk_next);
static int skcipher_copy_iv(struct skcipher_walk *walk)
{
@ -399,7 +399,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
unsigned size;
u8 *iv;
aligned_bs = ALIGN(bs, alignmask);
aligned_bs = ALIGN(bs, alignmask + 1);
/* Minimum size to align buffer by alignmask. */
size = alignmask & ~a;
@ -437,7 +437,6 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
}
walk->page = NULL;
walk->nbytes = walk->total;
return skcipher_walk_next(walk);
}

View File

@ -184,7 +184,6 @@ static struct shash_alg sm3_alg = {
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -415,12 +415,14 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
}
if (secs)
if (secs) {
ret = test_mb_aead_jiffies(data, enc, *b_size,
secs, num_mb);
else
cond_resched();
} else {
ret = test_mb_aead_cycles(data, enc, *b_size,
num_mb);
}
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@ -660,11 +662,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
*b_size + (enc ? 0 : authsize),
iv);
if (secs)
if (secs) {
ret = test_aead_jiffies(req, enc, *b_size,
secs);
else
cond_resched();
} else {
ret = test_aead_cycles(req, enc, *b_size);
}
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@ -876,11 +880,13 @@ static void test_mb_ahash_speed(const char *algo, unsigned int secs,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
if (secs)
if (secs) {
ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
num_mb);
else
cond_resched();
} else {
ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
}
if (ret) {
@ -1103,12 +1109,14 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
ahash_request_set_crypt(req, sg, output, speed[i].plen);
if (secs)
if (secs) {
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, secs);
else
cond_resched();
} else {
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
}
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
@ -1367,13 +1375,15 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
iv);
}
if (secs)
if (secs) {
ret = test_mb_acipher_jiffies(data, enc,
*b_size, secs,
num_mb);
else
cond_resched();
} else {
ret = test_mb_acipher_cycles(data, enc,
*b_size, num_mb);
}
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@ -1581,12 +1591,14 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
if (secs)
if (secs) {
ret = test_acipher_jiffies(req, enc,
*b_size, secs);
else
cond_resched();
} else {
ret = test_acipher_cycles(req, enc,
*b_size);
}
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@ -1939,7 +1951,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
break;
case 109:
ret += tcrypt_test("vmac(aes)");
ret += tcrypt_test("vmac64(aes)");
break;
case 111:

View File

@ -259,9 +259,15 @@ static int ahash_partial_update(struct ahash_request **preq,
return ret;
}
enum hash_test {
HASH_TEST_DIGEST,
HASH_TEST_FINAL,
HASH_TEST_FINUP
};
static int __test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template, unsigned int tcount,
bool use_digest, const int align_offset)
enum hash_test test_type, const int align_offset)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
size_t digest_size = crypto_ahash_digestsize(tfm);
@ -332,14 +338,17 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
if (use_digest) {
switch (test_type) {
case HASH_TEST_DIGEST:
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
} else {
break;
case HASH_TEST_FINAL:
memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
@ -371,6 +380,29 @@ static int __test_hash(struct crypto_ahash *tfm,
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
break;
case HASH_TEST_FINUP:
memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
ret = ahash_guard_result(result, 1, digest_size);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: used req->result\n", j, algo);
goto out;
}
ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
break;
}
if (memcmp(result, template[i].digest,
@ -383,6 +415,9 @@ static int __test_hash(struct crypto_ahash *tfm,
}
}
if (test_type)
goto out;
j = 0;
for (i = 0; i < tcount; i++) {
/* alignment tests are only done with continuous buffers */
@ -540,24 +575,24 @@ static int __test_hash(struct crypto_ahash *tfm,
static int test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template,
unsigned int tcount, bool use_digest)
unsigned int tcount, enum hash_test test_type)
{
unsigned int alignmask;
int ret;
ret = __test_hash(tfm, template, tcount, use_digest, 0);
ret = __test_hash(tfm, template, tcount, test_type, 0);
if (ret)
return ret;
/* test unaligned buffers, check with one byte offset */
ret = __test_hash(tfm, template, tcount, use_digest, 1);
ret = __test_hash(tfm, template, tcount, test_type, 1);
if (ret)
return ret;
alignmask = crypto_tfm_alg_alignmask(&tfm->base);
if (alignmask) {
/* Check if alignment mask for tfm is correctly set. */
ret = __test_hash(tfm, template, tcount, use_digest,
ret = __test_hash(tfm, template, tcount, test_type,
alignmask + 1);
if (ret)
return ret;
@ -1803,9 +1838,11 @@ static int __alg_test_hash(const struct hash_testvec *template,
return PTR_ERR(tfm);
}
err = test_hash(tfm, template, tcount, true);
err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST);
if (!err)
err = test_hash(tfm, template, tcount, false);
err = test_hash(tfm, template, tcount, HASH_TEST_FINAL);
if (!err)
err = test_hash(tfm, template, tcount, HASH_TEST_FINUP);
crypto_free_ahash(tfm);
return err;
}
@ -3478,10 +3515,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(tgr192_tv_template)
}
}, {
.alg = "vmac(aes)",
.alg = "vmac64(aes)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_vmac128_tv_template)
.hash = __VECS(vmac64_aes_tv_template)
}
}, {
.alg = "wp256",

View File

@ -641,15 +641,17 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
"\x11\x02" /* len */
"\x15\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
"\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
"\x02\x11" /* len */
"\x02\x15" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
"\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@ -739,7 +741,7 @@ static const struct kpp_testvec dh_tv_template[] = {
"\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11"
"\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50"
"\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17",
.secret_size = 529,
.secret_size = 533,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
@ -748,15 +750,17 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
"\x11\x02" /* len */
"\x15\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
"\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
"\x02\x11" /* len */
"\x02\x15" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
"\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@ -846,7 +850,7 @@ static const struct kpp_testvec dh_tv_template[] = {
"\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a"
"\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee"
"\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd",
.secret_size = 529,
.secret_size = 533,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
@ -4603,105 +4607,158 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
'\x02', '\x03', '\x02', '\x02',
'\x02', '\x04', '\x01', '\x07',
'\x04', '\x01', '\x04', '\x03',};
static const char vmac_string2[128] = {'a', 'b', 'c',};
static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
};
static const char vmac64_string1[144] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02',
'\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',
};
static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
'i', 'j', 'l', 'm',
'o', 'p', 'r', 's',
't', 'u', 'w', 'x', 'z'};
static const char vmac64_string2[144] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'a', 'b', 'c',
};
static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
'o', 'l', 'k', ']', '%',
'9', '2', '7', '!', 'A'};
static const char vmac64_string3[144] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
};
static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
'i', '!', '#', 'w', '0',
'z', '/', '4', 'A', 'n'};
static const char vmac64_string4[33] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm',
'o', 'p', 'r', 's', 't', 'u', 'w', 'x',
'z',
};
static const struct hash_testvec aes_vmac128_tv_template[] = {
{
static const char vmac64_string5[143] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'r', 'm', 'b', 't', 'c', 'o', 'l', 'k',
']', '%', '9', '2', '7', '!', 'A',
};
static const char vmac64_string6[145] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'p', 't', '*', '7', 'l', 'i', '!', '#',
'w', '0', 'z', '/', '4', 'A', 'n',
};
static const struct hash_testvec vmac64_aes_tv_template[] = {
{ /* draft-krovetz-vmac-01 test vector 1 */
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = "\0\0\0\0\0\0\0\0bcdefghi",
.psize = 16,
.digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b",
}, { /* draft-krovetz-vmac-01 test vector 2 */
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc",
.psize = 19,
.digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5",
}, { /* draft-krovetz-vmac-01 test vector 3 */
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
.psize = 64,
.digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98",
}, { /* draft-krovetz-vmac-01 test vector 4 */
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
.psize = 316,
.digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
.tap = { 1, 100, 200, 15 },
.np = 4,
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.plaintext = NULL,
.digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
.psize = 0,
.ksize = 16,
.plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.psize = 16,
.digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07",
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.plaintext = vmac_string1,
.digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
.psize = 128,
.ksize = 16,
.ksize = 16,
.plaintext = vmac64_string1,
.psize = sizeof(vmac64_string1),
.digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce",
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.plaintext = vmac_string2,
.digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
.psize = 128,
.ksize = 16,
.ksize = 16,
.plaintext = vmac64_string2,
.psize = sizeof(vmac64_string2),
.digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9",
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.plaintext = vmac_string3,
.digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
.psize = 128,
.ksize = 16,
.ksize = 16,
.plaintext = vmac64_string3,
.psize = sizeof(vmac64_string3),
.digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d",
}, {
.key = "abcdefghijklmnop",
.plaintext = NULL,
.digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
.psize = 0,
.ksize = 16,
.plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.psize = 16,
.digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b",
}, {
.key = "abcdefghijklmnop",
.plaintext = vmac_string1,
.digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
.psize = 128,
.ksize = 16,
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = vmac64_string1,
.psize = sizeof(vmac64_string1),
.digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab",
}, {
.key = "abcdefghijklmnop",
.plaintext = vmac_string2,
.digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
.psize = 128,
.ksize = 16,
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = vmac64_string2,
.psize = sizeof(vmac64_string2),
.digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11",
}, {
.key = "abcdefghijklmnop",
.plaintext = vmac_string3,
.digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
.psize = 128,
.ksize = 16,
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = vmac64_string3,
.psize = sizeof(vmac64_string3),
.digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b",
}, {
.key = "a09b5cd!f#07K\x00\x00\x00",
.plaintext = vmac_string4,
.digest = "\xab\xa5\x0f\xea\x42\x4e\xa1\x5f",
.psize = sizeof(vmac_string4),
.ksize = 16,
.key = "a09b5cd!f#07K\x00\x00\x00",
.ksize = 16,
.plaintext = vmac64_string4,
.psize = sizeof(vmac64_string4),
.digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab",
}, {
.key = "a09b5cd!f#07K\x00\x00\x00",
.plaintext = vmac_string5,
.digest = "\x25\x31\x98\xbc\x1d\xe8\x67\x60",
.psize = sizeof(vmac_string5),
.ksize = 16,
.key = "a09b5cd!f#07K\x00\x00\x00",
.ksize = 16,
.plaintext = vmac64_string5,
.psize = sizeof(vmac64_string5),
.digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25",
}, {
.key = "a09b5cd!f#07K\x00\x00\x00",
.plaintext = vmac_string6,
.digest = "\xc4\xae\x9b\x47\x95\x65\xeb\x41",
.psize = sizeof(vmac_string6),
.ksize = 16,
.key = "a09b5cd!f#07K\x00\x00\x00",
.ksize = 16,
.plaintext = vmac64_string6,
.psize = sizeof(vmac64_string6),
.digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4",
},
};

View File

@ -636,7 +636,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr192",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -648,7 +647,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr160",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -660,7 +658,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr128",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -1,6 +1,10 @@
/*
* Modified to interface to the Linux kernel
* VMAC: Message Authentication Code using Universal Hashing
*
* Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
*
* Copyright (c) 2009, Intel Corporation.
* Copyright (c) 2018, Google Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@ -16,14 +20,15 @@
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
/* --------------------------------------------------------------------------
* VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
* This implementation is herby placed in the public domain.
* The authors offers no warranty. Use at your own risk.
* Please send bug reports to the authors.
* Last modified: 17 APR 08, 1700 PDT
* ----------------------------------------------------------------------- */
/*
* Derived from:
* VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
* This implementation is herby placed in the public domain.
* The authors offers no warranty. Use at your own risk.
* Last modified: 17 APR 08, 1700 PDT
*/
#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
@ -31,9 +36,41 @@
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
#include <crypto/vmac.h>
#include <crypto/internal/hash.h>
/*
* User definable settings.
*/
#define VMAC_TAG_LEN 64
#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
#define VMAC_NONCEBYTES 16
/* per-transform (per-key) context */
struct vmac_tfm_ctx {
struct crypto_cipher *cipher;
u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
u64 polykey[2*VMAC_TAG_LEN/64];
u64 l3key[2*VMAC_TAG_LEN/64];
};
/* per-request context */
struct vmac_desc_ctx {
union {
u8 partial[VMAC_NHBYTES]; /* partial block */
__le64 partial_words[VMAC_NHBYTES / 8];
};
unsigned int partial_size; /* size of the partial block */
bool first_block_processed;
u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
union {
u8 bytes[VMAC_NONCEBYTES];
__be64 pads[VMAC_NONCEBYTES / 8];
} nonce;
unsigned int nonce_size; /* nonce bytes filled so far */
};
/*
* Constants and masks
*/
@ -318,13 +355,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
} while (0)
#endif
static void vhash_abort(struct vmac_ctx *ctx)
{
ctx->polytmp[0] = ctx->polykey[0] ;
ctx->polytmp[1] = ctx->polykey[1] ;
ctx->first_block_processed = 0;
}
static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
@ -364,280 +394,227 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
return rl;
}
static void vhash_update(const unsigned char *m,
unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
struct vmac_ctx *ctx)
/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
struct vmac_desc_ctx *dctx,
const __le64 *mptr, unsigned int blocks)
{
u64 rh, rl, *mptr;
const u64 *kptr = (u64 *)ctx->nhkey;
int i;
u64 ch, cl;
u64 pkh = ctx->polykey[0];
u64 pkl = ctx->polykey[1];
const u64 *kptr = tctx->nhkey;
const u64 pkh = tctx->polykey[0];
const u64 pkl = tctx->polykey[1];
u64 ch = dctx->polytmp[0];
u64 cl = dctx->polytmp[1];
u64 rh, rl;
if (!mbytes)
return;
BUG_ON(mbytes % VMAC_NHBYTES);
mptr = (u64 *)m;
i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
ch = ctx->polytmp[0];
cl = ctx->polytmp[1];
if (!ctx->first_block_processed) {
ctx->first_block_processed = 1;
if (!dctx->first_block_processed) {
dctx->first_block_processed = true;
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
ADD128(ch, cl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
i--;
blocks--;
}
while (i--) {
while (blocks--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
ctx->polytmp[0] = ch;
ctx->polytmp[1] = cl;
dctx->polytmp[0] = ch;
dctx->polytmp[1] = cl;
}
static u64 vhash(unsigned char m[], unsigned int mbytes,
u64 *tagl, struct vmac_ctx *ctx)
static int vmac_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
u64 rh, rl, *mptr;
const u64 *kptr = (u64 *)ctx->nhkey;
int i, remaining;
u64 ch, cl;
u64 pkh = ctx->polykey[0];
u64 pkl = ctx->polykey[1];
struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
__be64 out[2];
u8 in[16] = { 0 };
unsigned int i;
int err;
mptr = (u64 *)m;
i = mbytes / VMAC_NHBYTES;
remaining = mbytes % VMAC_NHBYTES;
if (ctx->first_block_processed) {
ch = ctx->polytmp[0];
cl = ctx->polytmp[1];
} else if (i) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
ch &= m62;
ADD128(ch, cl, pkh, pkl);
mptr += (VMAC_NHBYTES/sizeof(u64));
i--;
} else if (remaining) {
nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
ch &= m62;
ADD128(ch, cl, pkh, pkl);
mptr += (VMAC_NHBYTES/sizeof(u64));
goto do_l3;
} else {/* Empty String */
ch = pkh; cl = pkl;
goto do_l3;
if (keylen != VMAC_KEY_LEN) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
while (i--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
if (remaining) {
nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
}
do_l3:
vhash_abort(ctx);
remaining *= 8;
return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
}
static u64 vmac(unsigned char m[], unsigned int mbytes,
const unsigned char n[16], u64 *tagl,
struct vmac_ctx_t *ctx)
{
u64 *in_n, *out_p;
u64 p, h;
int i;
in_n = ctx->__vmac_ctx.cached_nonce;
out_p = ctx->__vmac_ctx.cached_aes;
i = n[15] & 1;
if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
in_n[0] = *(u64 *)(n);
in_n[1] = *(u64 *)(n+8);
((unsigned char *)in_n)[15] &= 0xFE;
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out_p, (unsigned char *)in_n);
((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
}
p = be64_to_cpup(out_p + i);
h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
return le64_to_cpu(p + h);
}
static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
{
u64 in[2] = {0}, out[2];
unsigned i;
int err = 0;
err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
err = crypto_cipher_setkey(tctx->cipher, key, keylen);
if (err)
return err;
/* Fill nh key */
((unsigned char *)in)[0] = 0x80;
for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out, (unsigned char *)in);
ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
((unsigned char *)in)[15] += 1;
in[0] = 0x80;
for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
tctx->nhkey[i] = be64_to_cpu(out[0]);
tctx->nhkey[i+1] = be64_to_cpu(out[1]);
in[15]++;
}
/* Fill poly key */
((unsigned char *)in)[0] = 0xC0;
in[1] = 0;
for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out, (unsigned char *)in);
ctx->__vmac_ctx.polytmp[i] =
ctx->__vmac_ctx.polykey[i] =
be64_to_cpup(out) & mpoly;
ctx->__vmac_ctx.polytmp[i+1] =
ctx->__vmac_ctx.polykey[i+1] =
be64_to_cpup(out+1) & mpoly;
((unsigned char *)in)[15] += 1;
in[0] = 0xC0;
in[15] = 0;
for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
in[15]++;
}
/* Fill ip key */
((unsigned char *)in)[0] = 0xE0;
in[1] = 0;
for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
in[0] = 0xE0;
in[15] = 0;
for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
do {
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out, (unsigned char *)in);
ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
((unsigned char *)in)[15] += 1;
} while (ctx->__vmac_ctx.l3key[i] >= p64
|| ctx->__vmac_ctx.l3key[i+1] >= p64);
crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
tctx->l3key[i] = be64_to_cpu(out[0]);
tctx->l3key[i+1] = be64_to_cpu(out[1]);
in[15]++;
} while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
}
/* Invalidate nonce/aes cache and reset other elements */
ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
ctx->__vmac_ctx.first_block_processed = 0;
return err;
return 0;
}
static int vmac_setkey(struct crypto_shash *parent,
const u8 *key, unsigned int keylen)
static int vmac_init(struct shash_desc *desc)
{
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
if (keylen != VMAC_KEY_LEN) {
crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
dctx->partial_size = 0;
dctx->first_block_processed = false;
memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
dctx->nonce_size = 0;
return 0;
}
static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{
const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int n;
/* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
if (dctx->nonce_size < VMAC_NONCEBYTES) {
n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
dctx->nonce_size += n;
p += n;
len -= n;
}
if (dctx->partial_size) {
n = min(len, VMAC_NHBYTES - dctx->partial_size);
memcpy(&dctx->partial[dctx->partial_size], p, n);
dctx->partial_size += n;
p += n;
len -= n;
if (dctx->partial_size == VMAC_NHBYTES) {
vhash_blocks(tctx, dctx, dctx->partial_words, 1);
dctx->partial_size = 0;
}
}
if (len >= VMAC_NHBYTES) {
n = round_down(len, VMAC_NHBYTES);
/* TODO: 'p' may be misaligned here */
vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
p += n;
len -= n;
}
if (len) {
memcpy(dctx->partial, p, len);
dctx->partial_size = len;
}
return 0;
}
static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
struct vmac_desc_ctx *dctx)
{
unsigned int partial = dctx->partial_size;
u64 ch = dctx->polytmp[0];
u64 cl = dctx->polytmp[1];
/* L1 and L2-hash the final block if needed */
if (partial) {
/* Zero-pad to next 128-bit boundary */
unsigned int n = round_up(partial, 16);
u64 rh, rl;
memset(&dctx->partial[partial], 0, n - partial);
nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
rh &= m62;
if (dctx->first_block_processed)
poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
rh, rl);
else
ADD128(ch, cl, rh, rl);
}
/* L3-hash the 128-bit output of L2-hash */
return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
}
static int vmac_final(struct shash_desc *desc, u8 *out)
{
const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
int index;
u64 hash, pad;
if (dctx->nonce_size != VMAC_NONCEBYTES)
return -EINVAL;
}
return vmac_set_key((u8 *)key, ctx);
}
static int vmac_init(struct shash_desc *pdesc)
{
return 0;
}
static int vmac_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
int expand;
int min;
expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
VMAC_NHBYTES - ctx->partial_size : 0;
min = len < expand ? len : expand;
memcpy(ctx->partial + ctx->partial_size, p, min);
ctx->partial_size += min;
if (len < expand)
return 0;
vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
ctx->partial_size = 0;
len -= expand;
p += expand;
if (len % VMAC_NHBYTES) {
memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
len % VMAC_NHBYTES);
ctx->partial_size = len % VMAC_NHBYTES;
}
vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
return 0;
}
static int vmac_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
vmac_t mac;
u8 nonce[16] = {};
/* vmac() ends up accessing outside the array bounds that
* we specify. In appears to access up to the next 2-word
* boundary. We'll just be uber cautious and zero the
* unwritten bytes in the buffer.
/*
* The VMAC specification requires a nonce at least 1 bit shorter than
* the block cipher's block length, so we actually only accept a 127-bit
* nonce. We define the unused bit to be the first one and require that
* it be 0, so the needed prepending of a 0 bit is implicit.
*/
if (ctx->partial_size) {
memset(ctx->partial + ctx->partial_size, 0,
VMAC_NHBYTES - ctx->partial_size);
}
mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
memcpy(out, &mac, sizeof(vmac_t));
memzero_explicit(&mac, sizeof(vmac_t));
memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
ctx->partial_size = 0;
if (dctx->nonce.bytes[0] & 0x80)
return -EINVAL;
/* Finish calculating the VHASH of the message */
hash = vhash_final(tctx, dctx);
/* Generate pseudorandom pad by encrypting the nonce */
BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
dctx->nonce.bytes);
pad = be64_to_cpu(dctx->nonce.pads[index]);
/* The VMAC is the sum of VHASH and the pseudorandom pad */
put_unaligned_be64(hash + pad, out);
return 0;
}
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
tctx->cipher = cipher;
return 0;
}
static void vmac_exit_tfm(struct crypto_tfm *tfm)
{
struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(tctx->cipher);
}
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
@ -655,7 +632,11 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(alg))
return PTR_ERR(alg);
inst = shash_alloc_instance("vmac", alg);
err = -EINVAL;
if (alg->cra_blocksize != VMAC_NONCEBYTES)
goto out_put_alg;
inst = shash_alloc_instance(tmpl->name, alg);
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
@ -670,11 +651,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
inst->alg.digestsize = sizeof(vmac_t);
inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
inst->alg.base.cra_init = vmac_init_tfm;
inst->alg.base.cra_exit = vmac_exit_tfm;
inst->alg.descsize = sizeof(struct vmac_desc_ctx);
inst->alg.digestsize = VMAC_TAG_LEN / 8;
inst->alg.init = vmac_init;
inst->alg.update = vmac_update;
inst->alg.final = vmac_final;
@ -691,8 +673,8 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
return err;
}
static struct crypto_template vmac_tmpl = {
.name = "vmac",
static struct crypto_template vmac64_tmpl = {
.name = "vmac64",
.create = vmac_create,
.free = shash_free_instance,
.module = THIS_MODULE,
@ -700,12 +682,12 @@ static struct crypto_template vmac_tmpl = {
static int __init vmac_module_init(void)
{
return crypto_register_template(&vmac_tmpl);
return crypto_register_template(&vmac64_tmpl);
}
static void __exit vmac_module_exit(void)
{
crypto_unregister_template(&vmac_tmpl);
crypto_unregister_template(&vmac64_tmpl);
}
module_init(vmac_module_init);
@ -713,4 +695,4 @@ module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
MODULE_ALIAS_CRYPTO("vmac");
MODULE_ALIAS_CRYPTO("vmac64");

View File

@ -1127,7 +1127,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp512",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -1139,7 +1138,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp384",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -1151,7 +1149,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp256",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View File

@ -138,7 +138,7 @@ static int post_crypt(struct skcipher_request *req)
if (rctx->dst != sg) {
rctx->dst[0] = *sg;
sg_unmark_end(rctx->dst);
scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
}
rctx->dst[0].length -= offset - sg->offset;
rctx->dst[0].offset = offset;
@ -204,7 +204,7 @@ static int pre_crypt(struct skcipher_request *req)
if (rctx->src != sg) {
rctx->src[0] = *sg;
sg_unmark_end(rctx->src);
scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
}
rctx->src[0].length -= offset - sg->offset;
rctx->src[0].offset = offset;

View File

@ -307,19 +307,6 @@ config HW_RANDOM_HISI
If unsure, say Y.
config HW_RANDOM_MSM
tristate "Qualcomm SoCs Random Number Generator support"
depends on HW_RANDOM && ARCH_QCOM
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Qualcomm SoCs.
To compile this driver as a module, choose M here. the
module will be called msm-rng.
If unsure, say Y.
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI

View File

@ -29,7 +29,6 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o

View File

@ -1,183 +0,0 @@
/*
* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
/* Device specific register offsets */
#define PRNG_DATA_OUT 0x0000
#define PRNG_STATUS 0x0004
#define PRNG_LFSR_CFG 0x0100
#define PRNG_CONFIG 0x0104
/* Device specific register masks and config values */
#define PRNG_LFSR_CFG_MASK 0x0000ffff
#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
#define PRNG_CONFIG_HW_ENABLE BIT(1)
#define PRNG_STATUS_DATA_AVAIL BIT(0)
#define MAX_HW_FIFO_DEPTH 16
#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
#define WORD_SZ 4
struct msm_rng {
void __iomem *base;
struct clk *clk;
struct hwrng hwrng;
};
#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
static int msm_rng_enable(struct hwrng *hwrng, int enable)
{
struct msm_rng *rng = to_msm_rng(hwrng);
u32 val;
int ret;
ret = clk_prepare_enable(rng->clk);
if (ret)
return ret;
if (enable) {
/* Enable PRNG only if it is not already enabled */
val = readl_relaxed(rng->base + PRNG_CONFIG);
if (val & PRNG_CONFIG_HW_ENABLE)
goto already_enabled;
val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
val &= ~PRNG_LFSR_CFG_MASK;
val |= PRNG_LFSR_CFG_CLOCKS;
writel(val, rng->base + PRNG_LFSR_CFG);
val = readl_relaxed(rng->base + PRNG_CONFIG);
val |= PRNG_CONFIG_HW_ENABLE;
writel(val, rng->base + PRNG_CONFIG);
} else {
val = readl_relaxed(rng->base + PRNG_CONFIG);
val &= ~PRNG_CONFIG_HW_ENABLE;
writel(val, rng->base + PRNG_CONFIG);
}
already_enabled:
clk_disable_unprepare(rng->clk);
return 0;
}
static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
{
struct msm_rng *rng = to_msm_rng(hwrng);
size_t currsize = 0;
u32 *retdata = data;
size_t maxsize;
int ret;
u32 val;
/* calculate max size bytes to transfer back to caller */
maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
ret = clk_prepare_enable(rng->clk);
if (ret)
return ret;
/* read random data from hardware */
do {
val = readl_relaxed(rng->base + PRNG_STATUS);
if (!(val & PRNG_STATUS_DATA_AVAIL))
break;
val = readl_relaxed(rng->base + PRNG_DATA_OUT);
if (!val)
break;
*retdata++ = val;
currsize += WORD_SZ;
/* make sure we stay on 32bit boundary */
if ((maxsize - currsize) < WORD_SZ)
break;
} while (currsize < maxsize);
clk_disable_unprepare(rng->clk);
return currsize;
}
static int msm_rng_init(struct hwrng *hwrng)
{
return msm_rng_enable(hwrng, 1);
}
static void msm_rng_cleanup(struct hwrng *hwrng)
{
msm_rng_enable(hwrng, 0);
}
static int msm_rng_probe(struct platform_device *pdev)
{
struct resource *res;
struct msm_rng *rng;
int ret;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
if (!rng)
return -ENOMEM;
platform_set_drvdata(pdev, rng);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rng->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rng->base))
return PTR_ERR(rng->base);
rng->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(rng->clk))
return PTR_ERR(rng->clk);
rng->hwrng.name = KBUILD_MODNAME,
rng->hwrng.init = msm_rng_init,
rng->hwrng.cleanup = msm_rng_cleanup,
rng->hwrng.read = msm_rng_read,
ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
if (ret) {
dev_err(&pdev->dev, "failed to register hwrng\n");
return ret;
}
return 0;
}
static const struct of_device_id msm_rng_of_match[] = {
{ .compatible = "qcom,prng", },
{}
};
MODULE_DEVICE_TABLE(of, msm_rng_of_match);
static struct platform_driver msm_rng_driver = {
.probe = msm_rng_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(msm_rng_of_match),
}
};
module_platform_driver(msm_rng_driver);
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("The Linux Foundation");
MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
MODULE_LICENSE("GPL v2");

View File

@ -585,6 +585,17 @@ config CRYPTO_DEV_QCE
hardware. To compile this driver as a module, choose M here. The
module will be called qcrypto.
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
depends on ARCH_QCOM || COMPILE_TEST
select CRYPTO_RNG
help
This driver provides support for the Random Number
Generator hardware found on Qualcomm SoCs.
To compile this driver as a module, choose M here. The
module will be called qcom-rng. If unsure, say N.
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
depends on PPC64 && VSX
@ -689,8 +700,10 @@ config CRYPTO_DEV_SAFEXCEL
select CRYPTO_AES
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
select CRYPTO_DES
select CRYPTO_HASH
select CRYPTO_HMAC
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@ -746,4 +759,6 @@ config CRYPTO_DEV_CCREE
cryptographic operations on the system REE.
If unsure say Y.
source "drivers/crypto/hisilicon/Kconfig"
endif # CRYPTO_HW

View File

@ -33,6 +33,7 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
@ -45,3 +46,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += hisilicon/

View File

@ -1132,8 +1132,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC |
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@ -1153,8 +1152,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cfb(aes)",
.cra_driver_name = "cfb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC |
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@ -1174,8 +1172,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_NEED_FALLBACK |
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@ -1196,8 +1193,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC |
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@ -1217,8 +1213,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC |
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@ -1237,8 +1232,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ofb(aes)",
.cra_driver_name = "ofb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC |
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),

View File

@ -186,7 +186,10 @@ static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
* always be the same. Use a macro for the key size to avoid unnecessary
* computations.
*/
copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE);
copied = sg_copy_to_buffer(pubkey,
sg_nents_for_len(pubkey,
ATMEL_ECC_PUBKEY_SIZE),
cmd->data, ATMEL_ECC_PUBKEY_SIZE);
if (copied != ATMEL_ECC_PUBKEY_SIZE)
return -EINVAL;
@ -268,15 +271,17 @@ static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
struct kpp_request *req = areq;
struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_ecc_cmd *cmd = &work_data->cmd;
size_t copied;
size_t n_sz = ctx->n_sz;
size_t copied, n_sz;
if (status)
goto free_work_data;
/* might want less than we've got */
n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
/* copy the shared secret */
copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX],
n_sz);
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
&cmd->data[RSP_DATA_IDX], n_sz);
if (copied != n_sz)
status = -EINVAL;
@ -440,7 +445,7 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
size_t copied;
size_t copied, nbytes;
int ret = 0;
if (ctx->do_fallback) {
@ -448,10 +453,14 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
return crypto_kpp_generate_public_key(req);
}
/* might want less than we've got */
nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
/* public key was saved at private key generation */
copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key,
ATMEL_ECC_PUBKEY_SIZE);
if (copied != ATMEL_ECC_PUBKEY_SIZE)
copied = sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, nbytes),
ctx->public_key, nbytes);
if (copied != nbytes)
ret = -EINVAL;
return ret;
@ -470,6 +479,10 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
return crypto_kpp_compute_shared_secret(req);
}
/* must have exactly two points to be on the curve */
if (req->src_len != ATMEL_ECC_PUBKEY_SIZE)
return -EINVAL;
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
GFP_ATOMIC;
@ -554,10 +567,6 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
}
crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n",
crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback)));
ctx->fallback = fallback;
return 0;

View File

@ -2316,9 +2316,7 @@ struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
goto error;
}
tfm = crypto_alloc_ahash(name,
CRYPTO_ALG_TYPE_AHASH,
CRYPTO_ALG_TYPE_AHASH_MASK);
tfm = crypto_alloc_ahash(name, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
goto error;

View File

@ -2704,7 +2704,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha1",
.cra_driver_name = "artpec-sha1",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@ -2727,7 +2727,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha256",
.cra_driver_name = "artpec-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@ -2751,7 +2751,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "artpec-hmac-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@ -2777,7 +2777,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha384",
.cra_driver_name = "artpec-sha384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@ -2801,7 +2801,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "artpec-hmac-sha384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@ -2824,7 +2824,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha512",
.cra_driver_name = "artpec-sha512",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@ -2848,7 +2848,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "artpec-hmac-sha512",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@ -2867,8 +2867,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "artpec6-ecb-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@ -2888,8 +2887,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "artpec6-ctr-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC |
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
@ -2911,8 +2909,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "artpec6-cbc-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@ -2933,8 +2930,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "xts(aes)",
.cra_driver_name = "artpec6-xts-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@ -2964,7 +2960,7 @@ static struct aead_alg aead_algos[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "artpec-gcm-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),

View File

@ -3914,8 +3914,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-iproc",
.cra_blocksize = MD5_BLOCK_WORDS * 4,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC,
}
},
.cipher_info = {
@ -4649,8 +4648,7 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
hash->halg.base.cra_init = ahash_cra_init;
hash->halg.base.cra_exit = generic_cra_exit;
hash->halg.base.cra_type = &crypto_ahash_type;
hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
@ -4691,7 +4689,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
INIT_LIST_HEAD(&aead->base.cra_list);
aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
/* setkey set in alg initialization */
aead->setauthsize = aead_setauthsize;
aead->encrypt = aead_encrypt;

View File

@ -1846,8 +1846,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
alg->cra_type = &crypto_ahash_type;
alg->cra_flags = CRYPTO_ALG_ASYNC;
t_alg->alg_type = template->alg_type;

View File

@ -351,7 +351,7 @@ static int cvm_enc_dec_init(struct crypto_tfm *tfm)
return 0;
}
struct crypto_alg algs[] = { {
static struct crypto_alg algs[] = { {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cvm_enc_ctx),

Some files were not shown because too many files have changed in this diff Show More