crypto: echainiv - Replace chaining with multiplication

The current implementation uses a global per-cpu array to store
data which are used to derive the next IV.  This is insecure as
the attacker may change the stored data.

This patch removes all traces of chaining and replaces it with
multiplication of the salt and the sequence number.

Fixes: a10f554fa7 ("crypto: echainiv - Add encrypted chain IV...")
Cc: stable@vger.kernel.org
Reported-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Herbert Xu 2016-09-07 18:42:08 +08:00
parent 0bd2223594
commit 53a5d5ddcc
1 changed files with 25 additions and 92 deletions

View File

@ -1,8 +1,8 @@
/* /*
* echainiv: Encrypted Chain IV Generator * echainiv: Encrypted Chain IV Generator
* *
* This generator generates an IV based on a sequence number by xoring it * This generator generates an IV based on a sequence number by multiplying
* with a salt and then encrypting it with the same key as used to encrypt * it with a salt and then encrypting it with the same key as used to encrypt
* the plain text. This algorithm requires that the block size be equal * the plain text. This algorithm requires that the block size be equal
* to the IV size. It is mainly useful for CBC. * to the IV size. It is mainly useful for CBC.
* *
@ -24,81 +24,17 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/percpu.h> #include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h> #include <linux/string.h>
#define MAX_IV_SIZE 16
static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
/* We don't care if we get preempted and read/write IVs from the next CPU. */
static void echainiv_read_iv(u8 *dst, unsigned size)
{
u32 *a = (u32 *)dst;
u32 __percpu *b = echainiv_iv;
for (; size >= 4; size -= 4) {
*a++ = this_cpu_read(*b);
b++;
}
}
static void echainiv_write_iv(const u8 *src, unsigned size)
{
const u32 *a = (const u32 *)src;
u32 __percpu *b = echainiv_iv;
for (; size >= 4; size -= 4) {
this_cpu_write(*b, *a);
a++;
b++;
}
}
static void echainiv_encrypt_complete2(struct aead_request *req, int err)
{
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv;
unsigned int ivsize;
if (err == -EINPROGRESS)
return;
if (err)
goto out;
geniv = crypto_aead_reqtfm(req);
ivsize = crypto_aead_ivsize(geniv);
echainiv_write_iv(subreq->iv, ivsize);
if (req->iv != subreq->iv)
memcpy(req->iv, subreq->iv, ivsize);
out:
if (req->iv != subreq->iv)
kzfree(subreq->iv);
}
static void echainiv_encrypt_complete(struct crypto_async_request *base,
int err)
{
struct aead_request *req = base->data;
echainiv_encrypt_complete2(req, err);
aead_request_complete(req, err);
}
static int echainiv_encrypt(struct aead_request *req) static int echainiv_encrypt(struct aead_request *req)
{ {
struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req); struct aead_request *subreq = aead_request_ctx(req);
crypto_completion_t compl; __be64 nseqno;
void *data; u64 seqno;
u8 *info; u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv); unsigned int ivsize = crypto_aead_ivsize(geniv);
int err; int err;
@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
aead_request_set_tfm(subreq, ctx->child); aead_request_set_tfm(subreq, ctx->child);
compl = echainiv_encrypt_complete;
data = req;
info = req->iv; info = req->iv;
if (req->src != req->dst) { if (req->src != req->dst) {
@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
return err; return err;
} }
if (unlikely(!IS_ALIGNED((unsigned long)info, aead_request_set_callback(subreq, req->base.flags,
crypto_aead_alignmask(geniv) + 1))) { req->base.complete, req->base.data);
info = kmalloc(ivsize, req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
GFP_ATOMIC);
if (!info)
return -ENOMEM;
memcpy(info, req->iv, ivsize);
}
aead_request_set_callback(subreq, req->base.flags, compl, data);
aead_request_set_crypt(subreq, req->dst, req->dst, aead_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, info); req->cryptlen, info);
aead_request_set_ad(subreq, req->assoclen); aead_request_set_ad(subreq, req->assoclen);
crypto_xor(info, ctx->salt, ivsize); memcpy(&nseqno, info + ivsize - 8, 8);
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); seqno = be64_to_cpu(nseqno);
echainiv_read_iv(info, ivsize); memset(info, 0, ivsize);
err = crypto_aead_encrypt(subreq); scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
echainiv_encrypt_complete2(req, err);
return err; do {
u64 a;
memcpy(&a, ctx->salt + ivsize - 8, 8);
a |= 1;
a *= seqno;
memcpy(info + ivsize - 8, &a, 8);
} while ((ivsize -= 8));
return crypto_aead_encrypt(subreq);
} }
static int echainiv_decrypt(struct aead_request *req) static int echainiv_decrypt(struct aead_request *req)
@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
alg = crypto_spawn_aead_alg(spawn); alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL; err = -EINVAL;
if (inst->alg.ivsize & (sizeof(u32) - 1) || if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
inst->alg.ivsize > MAX_IV_SIZE)
goto free_inst; goto free_inst;
inst->alg.encrypt = echainiv_encrypt; inst->alg.encrypt = echainiv_encrypt;
@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
inst->alg.init = aead_init_geniv; inst->alg.init = aead_init_geniv;
inst->alg.exit = aead_exit_geniv; inst->alg.exit = aead_exit_geniv;
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.base.cra_ctxsize += inst->alg.ivsize; inst->alg.base.cra_ctxsize += inst->alg.ivsize;