2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2009-10-19 10:53:06 +08:00
|
|
|
/*
|
|
|
|
* Accelerated GHASH implementation with Intel PCLMULQDQ-NI
|
|
|
|
* instructions. This file contains glue code.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2009 Intel Corp.
|
|
|
|
* Author: Huang Ying <ying.huang@intel.com>
|
|
|
|
*/
|
|
|
|
|
2010-12-15 17:58:57 +08:00
|
|
|
#include <linux/err.h>
|
2009-10-19 10:53:06 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/crypto.h>
|
|
|
|
#include <crypto/algapi.h>
|
|
|
|
#include <crypto/cryptd.h>
|
|
|
|
#include <crypto/gf128mul.h>
|
|
|
|
#include <crypto/internal/hash.h>
|
2019-03-13 13:12:48 +08:00
|
|
|
#include <crypto/internal/simd.h>
|
2012-01-26 07:09:06 +08:00
|
|
|
#include <asm/cpu_device_id.h>
|
2019-03-13 13:12:48 +08:00
|
|
|
#include <asm/simd.h>
|
2009-10-19 10:53:06 +08:00
|
|
|
|
|
|
|
#define GHASH_BLOCK_SIZE 16
|
|
|
|
#define GHASH_DIGEST_SIZE 16
|
|
|
|
|
2014-04-04 20:24:03 +08:00
|
|
|
void clmul_ghash_mul(char *dst, const u128 *shash);
|
2009-10-19 10:53:06 +08:00
|
|
|
|
|
|
|
void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
|
2014-04-04 20:24:03 +08:00
|
|
|
const u128 *shash);
|
2009-10-19 10:53:06 +08:00
|
|
|
|
|
|
|
struct ghash_async_ctx {
|
|
|
|
struct cryptd_ahash *cryptd_tfm;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ghash_ctx {
|
2014-04-04 20:24:03 +08:00
|
|
|
u128 shash;
|
2009-10-19 10:53:06 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ghash_desc_ctx {
|
|
|
|
u8 buffer[GHASH_BLOCK_SIZE];
|
|
|
|
u32 bytes;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ghash_init(struct shash_desc *desc)
|
|
|
|
{
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
|
|
|
|
memset(dctx, 0, sizeof(*dctx));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_setkey(struct crypto_shash *tfm,
|
|
|
|
const u8 *key, unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
|
2014-03-28 01:14:40 +08:00
|
|
|
be128 *x = (be128 *)key;
|
|
|
|
u64 a, b;
|
2009-10-19 10:53:06 +08:00
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
if (keylen != GHASH_BLOCK_SIZE)
|
2009-10-19 10:53:06 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2014-03-28 01:14:40 +08:00
|
|
|
/* perform multiplication by 'x' in GF(2^128) */
|
|
|
|
a = be64_to_cpu(x->a);
|
|
|
|
b = be64_to_cpu(x->b);
|
|
|
|
|
2014-04-04 20:24:03 +08:00
|
|
|
ctx->shash.a = (b << 1) | (a >> 63);
|
|
|
|
ctx->shash.b = (a << 1) | (b >> 63);
|
2014-03-28 01:14:40 +08:00
|
|
|
|
|
|
|
if (a >> 63)
|
2014-04-04 20:24:03 +08:00
|
|
|
ctx->shash.b ^= ((u64)0xc2) << 56;
|
2009-10-19 10:53:06 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_update(struct shash_desc *desc,
|
|
|
|
const u8 *src, unsigned int srclen)
|
|
|
|
{
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
|
|
|
u8 *dst = dctx->buffer;
|
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
if (dctx->bytes) {
|
|
|
|
int n = min(srclen, dctx->bytes);
|
|
|
|
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
|
|
|
|
|
|
|
|
dctx->bytes -= n;
|
|
|
|
srclen -= n;
|
|
|
|
|
|
|
|
while (n--)
|
|
|
|
*pos++ ^= *src++;
|
|
|
|
|
|
|
|
if (!dctx->bytes)
|
|
|
|
clmul_ghash_mul(dst, &ctx->shash);
|
|
|
|
}
|
|
|
|
|
|
|
|
clmul_ghash_update(dst, src, srclen, &ctx->shash);
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
if (srclen & 0xf) {
|
|
|
|
src += srclen - (srclen & 0xf);
|
|
|
|
srclen &= 0xf;
|
|
|
|
dctx->bytes = GHASH_BLOCK_SIZE - srclen;
|
|
|
|
while (srclen--)
|
|
|
|
*dst++ ^= *src++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
|
|
|
{
|
|
|
|
u8 *dst = dctx->buffer;
|
|
|
|
|
|
|
|
if (dctx->bytes) {
|
|
|
|
u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
|
|
|
|
|
|
|
|
while (dctx->bytes--)
|
|
|
|
*tmp++ ^= 0;
|
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
clmul_ghash_mul(dst, &ctx->shash);
|
|
|
|
kernel_fpu_end();
|
|
|
|
}
|
|
|
|
|
|
|
|
dctx->bytes = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_final(struct shash_desc *desc, u8 *dst)
|
|
|
|
{
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
|
|
|
u8 *buf = dctx->buffer;
|
|
|
|
|
|
|
|
ghash_flush(ctx, dctx);
|
|
|
|
memcpy(dst, buf, GHASH_BLOCK_SIZE);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct shash_alg ghash_alg = {
|
|
|
|
.digestsize = GHASH_DIGEST_SIZE,
|
|
|
|
.init = ghash_init,
|
|
|
|
.update = ghash_update,
|
|
|
|
.final = ghash_final,
|
|
|
|
.setkey = ghash_setkey,
|
|
|
|
.descsize = sizeof(struct ghash_desc_ctx),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "__ghash",
|
|
|
|
.cra_driver_name = "__ghash-pclmulqdqni",
|
|
|
|
.cra_priority = 0,
|
2018-07-01 06:16:11 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_INTERNAL,
|
2009-10-19 10:53:06 +08:00
|
|
|
.cra_blocksize = GHASH_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct ghash_ctx),
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ghash_async_init(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
|
|
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
2016-06-21 16:55:16 +08:00
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
2009-10-19 10:53:06 +08:00
|
|
|
|
2016-06-21 16:55:16 +08:00
|
|
|
desc->tfm = child;
|
|
|
|
return crypto_shash_init(desc);
|
2009-10-19 10:53:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_update(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
2016-06-21 16:55:16 +08:00
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
2009-10-19 10:53:06 +08:00
|
|
|
|
2019-03-13 13:12:48 +08:00
|
|
|
if (!crypto_simd_usable() ||
|
2016-06-21 16:55:16 +08:00
|
|
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
2009-10-19 10:53:06 +08:00
|
|
|
memcpy(cryptd_req, req, sizeof(*req));
|
|
|
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
|
|
|
return crypto_ahash_update(cryptd_req);
|
|
|
|
} else {
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
return shash_ahash_update(req, desc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_final(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
2016-06-21 16:55:16 +08:00
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
2009-10-19 10:53:06 +08:00
|
|
|
|
2019-03-13 13:12:48 +08:00
|
|
|
if (!crypto_simd_usable() ||
|
2016-06-21 16:55:16 +08:00
|
|
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
2009-10-19 10:53:06 +08:00
|
|
|
memcpy(cryptd_req, req, sizeof(*req));
|
|
|
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
|
|
|
return crypto_ahash_final(cryptd_req);
|
|
|
|
} else {
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
return crypto_shash_final(desc, req->result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-29 22:45:33 +08:00
|
|
|
static int ghash_async_import(struct ahash_request *req, const void *in)
|
|
|
|
{
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
|
|
|
|
ghash_async_init(req);
|
|
|
|
memcpy(dctx, in, sizeof(*dctx));
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_export(struct ahash_request *req, void *out)
|
|
|
|
{
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
|
|
|
|
memcpy(out, dctx, sizeof(*dctx));
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2009-10-19 10:53:06 +08:00
|
|
|
static int ghash_async_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
|
|
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
|
|
|
|
2019-03-13 13:12:48 +08:00
|
|
|
if (!crypto_simd_usable() ||
|
2016-06-21 16:55:16 +08:00
|
|
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
2009-10-19 10:53:06 +08:00
|
|
|
memcpy(cryptd_req, req, sizeof(*req));
|
|
|
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
|
|
|
return crypto_ahash_digest(cryptd_req);
|
|
|
|
} else {
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
|
|
|
|
|
|
|
desc->tfm = child;
|
|
|
|
return shash_ahash_digest(req, desc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct crypto_ahash *child = &ctx->cryptd_tfm->base;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
|
|
|
crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
|
|
|
|
& CRYPTO_TFM_REQ_MASK);
|
|
|
|
err = crypto_ahash_setkey(child, key, keylen);
|
|
|
|
crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
|
|
|
|
& CRYPTO_TFM_RES_MASK);
|
|
|
|
|
2011-05-26 11:29:33 +08:00
|
|
|
return err;
|
2009-10-19 10:53:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct cryptd_ahash *cryptd_tfm;
|
|
|
|
struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
2015-03-31 04:01:49 +08:00
|
|
|
cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni",
|
|
|
|
CRYPTO_ALG_INTERNAL,
|
|
|
|
CRYPTO_ALG_INTERNAL);
|
2009-10-19 10:53:06 +08:00
|
|
|
if (IS_ERR(cryptd_tfm))
|
|
|
|
return PTR_ERR(cryptd_tfm);
|
|
|
|
ctx->cryptd_tfm = cryptd_tfm;
|
|
|
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
|
sizeof(struct ahash_request) +
|
|
|
|
crypto_ahash_reqsize(&cryptd_tfm->base));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ghash_async_exit_tfm(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
cryptd_free_ahash(ctx->cryptd_tfm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ahash_alg ghash_async_alg = {
|
|
|
|
.init = ghash_async_init,
|
|
|
|
.update = ghash_async_update,
|
|
|
|
.final = ghash_async_final,
|
|
|
|
.setkey = ghash_async_setkey,
|
|
|
|
.digest = ghash_async_digest,
|
2015-11-29 22:45:33 +08:00
|
|
|
.export = ghash_async_export,
|
|
|
|
.import = ghash_async_import,
|
2009-10-19 10:53:06 +08:00
|
|
|
.halg = {
|
|
|
|
.digestsize = GHASH_DIGEST_SIZE,
|
2015-11-29 22:45:33 +08:00
|
|
|
.statesize = sizeof(struct ghash_desc_ctx),
|
2009-10-19 10:53:06 +08:00
|
|
|
.base = {
|
|
|
|
.cra_name = "ghash",
|
|
|
|
.cra_driver_name = "ghash-clmulni",
|
|
|
|
.cra_priority = 400,
|
2015-09-03 19:32:01 +08:00
|
|
|
.cra_ctxsize = sizeof(struct ghash_async_ctx),
|
2018-07-01 06:16:12 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC,
|
2009-10-19 10:53:06 +08:00
|
|
|
.cra_blocksize = GHASH_BLOCK_SIZE,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
.cra_init = ghash_async_init_tfm,
|
|
|
|
.cra_exit = ghash_async_exit_tfm,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2012-01-26 07:09:06 +08:00
|
|
|
static const struct x86_cpu_id pcmul_cpu_id[] = {
|
|
|
|
X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), /* Pickle-Mickle-Duck */
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id);
|
|
|
|
|
2009-10-19 10:53:06 +08:00
|
|
|
static int __init ghash_pclmulqdqni_mod_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2012-01-26 07:09:06 +08:00
|
|
|
if (!x86_match_cpu(pcmul_cpu_id))
|
2009-10-19 10:53:06 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
err = crypto_register_shash(&ghash_alg);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
err = crypto_register_ahash(&ghash_async_alg);
|
|
|
|
if (err)
|
|
|
|
goto err_shash;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_shash:
|
|
|
|
crypto_unregister_shash(&ghash_alg);
|
|
|
|
err_out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit ghash_pclmulqdqni_mod_exit(void)
|
|
|
|
{
|
|
|
|
crypto_unregister_ahash(&ghash_async_alg);
|
|
|
|
crypto_unregister_shash(&ghash_alg);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(ghash_pclmulqdqni_mod_init);
|
|
|
|
module_exit(ghash_pclmulqdqni_mod_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
2019-07-20 14:09:18 +08:00
|
|
|
MODULE_DESCRIPTION("GHASH hash function, accelerated by PCLMULQDQ-NI");
|
2014-11-21 09:05:53 +08:00
|
|
|
MODULE_ALIAS_CRYPTO("ghash");
|