mirror of https://gitee.com/openkylin/linux.git
crypto: arm/aes - replace bit-sliced OpenSSL NEON code
This replaces the unwieldy generated implementation of bit-sliced AES in CBC/CTR/XTS modes that originated in the OpenSSL project with a new version that is heavily based on the OpenSSL implementation, but has a number of advantages over the old version: - it does not rely on the scalar AES cipher that also originated in the OpenSSL project and contains redundant lookup tables and key schedule generation routines (which we already have in crypto/aes_generic.) - it uses the same expanded key schedule for encryption and decryption, reducing the size of the per-key data structure by 1696 bytes - it adds an implementation of AES in ECB mode, which can be wrapped by other generic chaining mode implementations - it moves the handling of corner cases that are non critical to performance to the glue layer written in C - it was written directly in assembler rather than generated from a Perl script Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
1abee99eaf
commit
cc477bf645
|
@ -73,6 +73,7 @@ config CRYPTO_AES_ARM_BS
|
|||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SIMD
|
||||
select CRYPTO_AES_ARM
|
||||
help
|
||||
Use a faster and more secure NEON based implementation of AES in CBC,
|
||||
CTR and XTS modes
|
||||
|
|
|
@ -28,7 +28,7 @@ endif
|
|||
endif
|
||||
|
||||
aes-arm-y := aes-cipher-core.o aes-cipher-glue.o
|
||||
aes-arm-bs-y := aes-armv4.o aesbs-core.o aesbs-glue.o
|
||||
aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
|
||||
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
|
||||
sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
|
||||
sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
|
||||
|
@ -46,13 +46,10 @@ chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
|
|||
quiet_cmd_perl = PERL $@
|
||||
cmd_perl = $(PERL) $(<) > $(@)
|
||||
|
||||
$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
|
||||
$(call cmd,perl)
|
||||
|
||||
$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
|
||||
$(call cmd,perl)
|
||||
|
||||
$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
|
||||
$(call cmd,perl)
|
||||
|
||||
.PRECIOUS: $(obj)/aesbs-core.S $(obj)/sha256-core.S $(obj)/sha512-core.S
|
||||
.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,405 @@
|
|||
/*
|
||||
* Bit sliced AES using NEON instructions
|
||||
*
|
||||
* Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/cbc.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
||||
MODULE_ALIAS_CRYPTO("ecb(aes)");
|
||||
MODULE_ALIAS_CRYPTO("cbc(aes)");
|
||||
MODULE_ALIAS_CRYPTO("ctr(aes)");
|
||||
MODULE_ALIAS_CRYPTO("xts(aes)");
|
||||
|
||||
asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
|
||||
|
||||
asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks);
|
||||
asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks);
|
||||
|
||||
asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, u8 iv[]);
|
||||
|
||||
asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, u8 ctr[], bool final);
|
||||
|
||||
asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, u8 iv[]);
|
||||
asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, u8 iv[]);
|
||||
|
||||
asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, const u8 in[],
|
||||
u8 out[]);
|
||||
|
||||
struct aesbs_ctx {
|
||||
int rounds;
|
||||
u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE);
|
||||
};
|
||||
|
||||
struct aesbs_cbc_ctx {
|
||||
struct aesbs_ctx key;
|
||||
u32 enc[AES_MAX_KEYLENGTH_U32];
|
||||
};
|
||||
|
||||
struct aesbs_xts_ctx {
|
||||
struct aesbs_ctx key;
|
||||
u32 twkey[AES_MAX_KEYLENGTH_U32];
|
||||
};
|
||||
|
||||
static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_aes_ctx rk;
|
||||
int err;
|
||||
|
||||
err = crypto_aes_expand_key(&rk, in_key, key_len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->rounds = 6 + key_len / 4;
|
||||
|
||||
kernel_neon_begin();
|
||||
aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
|
||||
kernel_neon_end();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __ecb_crypt(struct skcipher_request *req,
|
||||
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks))
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
||||
kernel_neon_begin();
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
|
||||
if (walk.nbytes < walk.total)
|
||||
blocks = round_down(blocks,
|
||||
walk.stride / AES_BLOCK_SIZE);
|
||||
|
||||
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
|
||||
ctx->rounds, blocks);
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __ecb_crypt(req, aesbs_ecb_encrypt);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __ecb_crypt(req, aesbs_ecb_decrypt);
|
||||
}
|
||||
|
||||
static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_aes_ctx rk;
|
||||
int err;
|
||||
|
||||
err = crypto_aes_expand_key(&rk, in_key, key_len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->key.rounds = 6 + key_len / 4;
|
||||
|
||||
memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc));
|
||||
|
||||
kernel_neon_begin();
|
||||
aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
|
||||
kernel_neon_end();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
|
||||
{
|
||||
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
__aes_arm_encrypt(ctx->enc, ctx->key.rounds, src, dst);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return crypto_cbc_encrypt_walk(req, cbc_encrypt_one);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
||||
kernel_neon_begin();
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
|
||||
if (walk.nbytes < walk.total)
|
||||
blocks = round_down(blocks,
|
||||
walk.stride / AES_BLOCK_SIZE);
|
||||
|
||||
aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key.rk, ctx->key.rounds, blocks,
|
||||
walk.iv);
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ctr_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
||||
kernel_neon_begin();
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
bool final = (walk.total % AES_BLOCK_SIZE) != 0;
|
||||
|
||||
if (walk.nbytes < walk.total) {
|
||||
blocks = round_down(blocks,
|
||||
walk.stride / AES_BLOCK_SIZE);
|
||||
final = false;
|
||||
}
|
||||
|
||||
aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->rk, ctx->rounds, blocks, walk.iv, final);
|
||||
|
||||
if (final) {
|
||||
u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||
u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||
|
||||
if (dst != src)
|
||||
memcpy(dst, src, walk.total % AES_BLOCK_SIZE);
|
||||
crypto_xor(dst, walk.iv, walk.total % AES_BLOCK_SIZE);
|
||||
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
break;
|
||||
}
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_aes_ctx rk;
|
||||
int err;
|
||||
|
||||
err = xts_verify_key(tfm, in_key, key_len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
key_len /= 2;
|
||||
err = crypto_aes_expand_key(&rk, in_key + key_len, key_len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memcpy(ctx->twkey, rk.key_enc, sizeof(ctx->twkey));
|
||||
|
||||
return aesbs_setkey(tfm, in_key, key_len);
|
||||
}
|
||||
|
||||
static int __xts_crypt(struct skcipher_request *req,
|
||||
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, u8 iv[]))
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
||||
__aes_arm_encrypt(ctx->twkey, ctx->key.rounds, walk.iv, walk.iv);
|
||||
|
||||
kernel_neon_begin();
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
|
||||
if (walk.nbytes < walk.total)
|
||||
blocks = round_down(blocks,
|
||||
walk.stride / AES_BLOCK_SIZE);
|
||||
|
||||
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
|
||||
ctx->key.rounds, blocks, walk.iv);
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __xts_crypt(req, aesbs_xts_encrypt);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __xts_crypt(req, aesbs_xts_decrypt);
|
||||
}
|
||||
|
||||
static struct skcipher_alg aes_algs[] = { {
|
||||
.base.cra_name = "__ecb(aes)",
|
||||
.base.cra_driver_name = "__ecb-aes-neonbs",
|
||||
.base.cra_priority = 250,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.walksize = 8 * AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(aes)",
|
||||
.base.cra_driver_name = "__cbc-aes-neonbs",
|
||||
.base.cra_priority = 250,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.walksize = 8 * AES_BLOCK_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_cbc_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(aes)",
|
||||
.base.cra_driver_name = "__ctr-aes-neonbs",
|
||||
.base.cra_priority = 250,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
.walksize = 8 * AES_BLOCK_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_setkey,
|
||||
.encrypt = ctr_encrypt,
|
||||
.decrypt = ctr_encrypt,
|
||||
}, {
|
||||
.base.cra_name = "__xts(aes)",
|
||||
.base.cra_driver_name = "__xts-aes-neonbs",
|
||||
.base.cra_priority = 250,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.walksize = 8 * AES_BLOCK_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_xts_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
} };
|
||||
|
||||
static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
|
||||
|
||||
static void aes_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
|
||||
if (aes_simd_algs[i])
|
||||
simd_skcipher_free(aes_simd_algs[i]);
|
||||
|
||||
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
}
|
||||
|
||||
static int __init aes_init(void)
|
||||
{
|
||||
struct simd_skcipher_alg *simd;
|
||||
const char *basename;
|
||||
const char *algname;
|
||||
const char *drvname;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (!(elf_hwcap & HWCAP_NEON))
|
||||
return -ENODEV;
|
||||
|
||||
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
|
||||
if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
|
||||
continue;
|
||||
|
||||
algname = aes_algs[i].base.cra_name + 2;
|
||||
drvname = aes_algs[i].base.cra_driver_name + 2;
|
||||
basename = aes_algs[i].base.cra_driver_name;
|
||||
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
||||
err = PTR_ERR(simd);
|
||||
if (IS_ERR(simd))
|
||||
goto unregister_simds;
|
||||
|
||||
aes_simd_algs[i] = simd;
|
||||
}
|
||||
return 0;
|
||||
|
||||
unregister_simds:
|
||||
aes_exit();
|
||||
return err;
|
||||
}
|
||||
|
||||
module_init(aes_init);
|
||||
module_exit(aes_exit);
|
|
@ -1,19 +0,0 @@
|
|||
|
||||
#define AES_MAXNR 14
|
||||
|
||||
struct AES_KEY {
|
||||
unsigned int rd_key[4 * (AES_MAXNR + 1)];
|
||||
int rounds;
|
||||
};
|
||||
|
||||
struct AES_CTX {
|
||||
struct AES_KEY enc_key;
|
||||
struct AES_KEY dec_key;
|
||||
};
|
||||
|
||||
asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
|
||||
asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
|
||||
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
|
||||
const int bits, struct AES_KEY *key);
|
||||
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
|
||||
const int bits, struct AES_KEY *key);
|
File diff suppressed because it is too large
Load Diff
|
@ -1,367 +0,0 @@
|
|||
/*
|
||||
* linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/cbc.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/module.h>
|
||||
#include <crypto/xts.h>
|
||||
|
||||
#include "aes_glue.h"
|
||||
|
||||
#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
|
||||
|
||||
struct BS_KEY {
|
||||
struct AES_KEY rk;
|
||||
int converted;
|
||||
u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
|
||||
} __aligned(8);
|
||||
|
||||
asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
|
||||
asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
|
||||
|
||||
asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
|
||||
struct BS_KEY *key, u8 iv[]);
|
||||
|
||||
asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
|
||||
struct BS_KEY *key, u8 const iv[]);
|
||||
|
||||
asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
|
||||
struct BS_KEY *key, u8 tweak[]);
|
||||
|
||||
asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
|
||||
struct BS_KEY *key, u8 tweak[]);
|
||||
|
||||
struct aesbs_cbc_ctx {
|
||||
struct AES_KEY enc;
|
||||
struct BS_KEY dec;
|
||||
};
|
||||
|
||||
struct aesbs_ctr_ctx {
|
||||
struct BS_KEY enc;
|
||||
};
|
||||
|
||||
struct aesbs_xts_ctx {
|
||||
struct BS_KEY enc;
|
||||
struct BS_KEY dec;
|
||||
struct AES_KEY twkey;
|
||||
};
|
||||
|
||||
static int aesbs_cbc_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int bits = key_len * 8;
|
||||
|
||||
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->dec.rk = ctx->enc;
|
||||
private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
|
||||
ctx->dec.converted = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aesbs_ctr_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int bits = key_len * 8;
|
||||
|
||||
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->enc.converted = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aesbs_xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int bits = key_len * 4;
|
||||
int err;
|
||||
|
||||
err = xts_verify_key(tfm, in_key, key_len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->dec.rk = ctx->enc.rk;
|
||||
private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
|
||||
private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
|
||||
ctx->enc.converted = ctx->dec.converted = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void aesbs_encrypt_one(struct crypto_skcipher *tfm,
|
||||
const u8 *src, u8 *dst)
|
||||
{
|
||||
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
AES_encrypt(src, dst, &ctx->enc);
|
||||
}
|
||||
|
||||
static int aesbs_cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return crypto_cbc_encrypt_walk(req, aesbs_encrypt_one);
|
||||
}
|
||||
|
||||
static inline void aesbs_decrypt_one(struct crypto_skcipher *tfm,
|
||||
const u8 *src, u8 *dst)
|
||||
{
|
||||
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
AES_decrypt(src, dst, &ctx->dec.rk);
|
||||
}
|
||||
|
||||
static int aesbs_cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
for (err = skcipher_walk_virt(&walk, req, false);
|
||||
(nbytes = walk.nbytes); err = skcipher_walk_done(&walk, nbytes)) {
|
||||
u32 blocks = nbytes / AES_BLOCK_SIZE;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
u8 *iv = walk.iv;
|
||||
|
||||
if (blocks >= 8) {
|
||||
kernel_neon_begin();
|
||||
bsaes_cbc_encrypt(src, dst, nbytes, &ctx->dec, iv);
|
||||
kernel_neon_end();
|
||||
nbytes %= AES_BLOCK_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
nbytes = crypto_cbc_decrypt_blocks(&walk, tfm,
|
||||
aesbs_decrypt_one);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void inc_be128_ctr(__be32 ctr[], u32 addend)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 3; i >= 0; i--, addend = 1) {
|
||||
u32 n = be32_to_cpu(ctr[i]) + addend;
|
||||
|
||||
ctr[i] = cpu_to_be32(n);
|
||||
if (n >= addend)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int aesbs_ctr_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
u32 blocks;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
__be32 *ctr = (__be32 *)walk.iv;
|
||||
u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
|
||||
|
||||
/* avoid 32 bit counter overflow in the NEON code */
|
||||
if (unlikely(headroom < blocks)) {
|
||||
blocks = headroom + 1;
|
||||
tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
|
||||
}
|
||||
kernel_neon_begin();
|
||||
bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
|
||||
walk.dst.virt.addr, blocks,
|
||||
&ctx->enc, walk.iv);
|
||||
kernel_neon_end();
|
||||
inc_be128_ctr(ctr, blocks);
|
||||
|
||||
err = skcipher_walk_done(&walk, tail);
|
||||
}
|
||||
if (walk.nbytes) {
|
||||
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||
u8 ks[AES_BLOCK_SIZE];
|
||||
|
||||
AES_encrypt(walk.iv, ks, &ctx->enc.rk);
|
||||
if (tdst != tsrc)
|
||||
memcpy(tdst, tsrc, walk.nbytes);
|
||||
crypto_xor(tdst, ks, walk.nbytes);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aesbs_xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
/* generate the initial tweak */
|
||||
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
|
||||
|
||||
while (walk.nbytes) {
|
||||
kernel_neon_begin();
|
||||
bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
walk.nbytes, &ctx->enc, walk.iv);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aesbs_xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
/* generate the initial tweak */
|
||||
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
|
||||
|
||||
while (walk.nbytes) {
|
||||
kernel_neon_begin();
|
||||
bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
walk.nbytes, &ctx->dec, walk.iv);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct skcipher_alg aesbs_algs[] = { {
|
||||
.base = {
|
||||
.cra_name = "__cbc(aes)",
|
||||
.cra_driver_name = "__cbc-aes-neonbs",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_cbc_set_key,
|
||||
.encrypt = aesbs_cbc_encrypt,
|
||||
.decrypt = aesbs_cbc_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__ctr(aes)",
|
||||
.cra_driver_name = "__ctr-aes-neonbs",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_ctr_set_key,
|
||||
.encrypt = aesbs_ctr_encrypt,
|
||||
.decrypt = aesbs_ctr_encrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__xts(aes)",
|
||||
.cra_driver_name = "__xts-aes-neonbs",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_xts_set_key,
|
||||
.encrypt = aesbs_xts_encrypt,
|
||||
.decrypt = aesbs_xts_decrypt,
|
||||
} };
|
||||
|
||||
struct simd_skcipher_alg *aesbs_simd_algs[ARRAY_SIZE(aesbs_algs)];
|
||||
|
||||
static void aesbs_mod_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aesbs_simd_algs) && aesbs_simd_algs[i]; i++)
|
||||
simd_skcipher_free(aesbs_simd_algs[i]);
|
||||
|
||||
crypto_unregister_skciphers(aesbs_algs, ARRAY_SIZE(aesbs_algs));
|
||||
}
|
||||
|
||||
static int __init aesbs_mod_init(void)
|
||||
{
|
||||
struct simd_skcipher_alg *simd;
|
||||
const char *basename;
|
||||
const char *algname;
|
||||
const char *drvname;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (!cpu_has_neon())
|
||||
return -ENODEV;
|
||||
|
||||
err = crypto_register_skciphers(aesbs_algs, ARRAY_SIZE(aesbs_algs));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aesbs_algs); i++) {
|
||||
algname = aesbs_algs[i].base.cra_name + 2;
|
||||
drvname = aesbs_algs[i].base.cra_driver_name + 2;
|
||||
basename = aesbs_algs[i].base.cra_driver_name;
|
||||
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
||||
err = PTR_ERR(simd);
|
||||
if (IS_ERR(simd))
|
||||
goto unregister_simds;
|
||||
|
||||
aesbs_simd_algs[i] = simd;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unregister_simds:
|
||||
aesbs_mod_exit();
|
||||
return err;
|
||||
}
|
||||
|
||||
module_init(aesbs_mod_init);
|
||||
module_exit(aesbs_mod_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_LICENSE("GPL");
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue