Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

This commit is contained in:
Linus Torvalds 2006-01-09 15:12:52 -08:00
commit 1fd5a46dd6
68 changed files with 1020 additions and 745 deletions

View File

@ -257,16 +257,15 @@ aes_enc_blk:
sub $8,%esp // space for register saves on stack sub $8,%esp // space for register saves on stack
add $16,%ebp // increment to next round key add $16,%ebp // increment to next round key
sub $10,%r3 cmp $12,%r3
je 4f // 10 rounds for 128-bit key jb 4f // 10 rounds for 128-bit key
add $32,%ebp lea 32(%ebp),%ebp
sub $2,%r3 je 3f // 12 rounds for 192-bit key
je 3f // 12 rounds for 128-bit key lea 32(%ebp),%ebp
add $32,%ebp
2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 128-bit key 2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 256-bit key
fwd_rnd2( -48(%ebp) ,ft_tab) fwd_rnd2( -48(%ebp) ,ft_tab)
3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 128-bit key 3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 192-bit key
fwd_rnd2( -16(%ebp) ,ft_tab) fwd_rnd2( -16(%ebp) ,ft_tab)
4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key 4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key
fwd_rnd2( +16(%ebp) ,ft_tab) fwd_rnd2( +16(%ebp) ,ft_tab)
@ -336,16 +335,15 @@ aes_dec_blk:
sub $8,%esp // space for register saves on stack sub $8,%esp // space for register saves on stack
sub $16,%ebp // increment to next round key sub $16,%ebp // increment to next round key
sub $10,%r3 cmp $12,%r3
je 4f // 10 rounds for 128-bit key jb 4f // 10 rounds for 128-bit key
sub $32,%ebp lea -32(%ebp),%ebp
sub $2,%r3 je 3f // 12 rounds for 192-bit key
je 3f // 12 rounds for 128-bit key lea -32(%ebp),%ebp
sub $32,%ebp
2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 128-bit key 2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 256-bit key
inv_rnd2( +48(%ebp), it_tab) inv_rnd2( +48(%ebp), it_tab)
3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 128-bit key 3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 192-bit key
inv_rnd2( +16(%ebp), it_tab) inv_rnd2( +16(%ebp), it_tab)
4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key 4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key
inv_rnd2( -16(%ebp), it_tab) inv_rnd2( -16(%ebp), it_tab)

View File

@ -36,6 +36,8 @@
* Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
* *
*/ */
#include <asm/byteorder.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
@ -59,7 +61,6 @@ struct aes_ctx {
}; };
#define WPOLY 0x011b #define WPOLY 0x011b
#define u32_in(x) le32_to_cpup((const __le32 *)(x))
#define bytes2word(b0, b1, b2, b3) \ #define bytes2word(b0, b1, b2, b3) \
(((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
@ -93,7 +94,6 @@ static u32 rcon_tab[RC_LENGTH];
u32 ft_tab[4][256]; u32 ft_tab[4][256];
u32 fl_tab[4][256]; u32 fl_tab[4][256];
static u32 ls_tab[4][256];
static u32 im_tab[4][256]; static u32 im_tab[4][256];
u32 il_tab[4][256]; u32 il_tab[4][256];
u32 it_tab[4][256]; u32 it_tab[4][256];
@ -144,15 +144,6 @@ static void gen_tabs(void)
fl_tab[2][i] = upr(w, 2); fl_tab[2][i] = upr(w, 2);
fl_tab[3][i] = upr(w, 3); fl_tab[3][i] = upr(w, 3);
/*
* table for key schedule if fl_tab above is
* not of the required form
*/
ls_tab[0][i] = w;
ls_tab[1][i] = upr(w, 1);
ls_tab[2][i] = upr(w, 2);
ls_tab[3][i] = upr(w, 3);
b = fi(inv_affine((u8)i)); b = fi(inv_affine((u8)i));
w = bytes2word(fe(b), f9(b), fd(b), fb(b)); w = bytes2word(fe(b), f9(b), fd(b), fb(b));
@ -393,13 +384,14 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
int i; int i;
u32 ss[8]; u32 ss[8];
struct aes_ctx *ctx = ctx_arg; struct aes_ctx *ctx = ctx_arg;
const __le32 *key = (const __le32 *)in_key;
/* encryption schedule */ /* encryption schedule */
ctx->ekey[0] = ss[0] = u32_in(in_key); ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]);
ctx->ekey[1] = ss[1] = u32_in(in_key + 4); ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]);
ctx->ekey[2] = ss[2] = u32_in(in_key + 8); ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]);
ctx->ekey[3] = ss[3] = u32_in(in_key + 12); ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]);
switch(key_len) { switch(key_len) {
case 16: case 16:
@ -410,8 +402,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break; break;
case 24: case 24:
ctx->ekey[4] = ss[4] = u32_in(in_key + 16); ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
ctx->ekey[5] = ss[5] = u32_in(in_key + 20); ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
for (i = 0; i < 7; i++) for (i = 0; i < 7; i++)
ke6(ctx->ekey, i); ke6(ctx->ekey, i);
kel6(ctx->ekey, 7); kel6(ctx->ekey, 7);
@ -419,10 +411,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break; break;
case 32: case 32:
ctx->ekey[4] = ss[4] = u32_in(in_key + 16); ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
ctx->ekey[5] = ss[5] = u32_in(in_key + 20); ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
ctx->ekey[6] = ss[6] = u32_in(in_key + 24); ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]);
ctx->ekey[7] = ss[7] = u32_in(in_key + 28); ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]);
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
ke8(ctx->ekey, i); ke8(ctx->ekey, i);
kel8(ctx->ekey, 6); kel8(ctx->ekey, 6);
@ -436,10 +428,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
/* decryption schedule */ /* decryption schedule */
ctx->dkey[0] = ss[0] = u32_in(in_key); ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]);
ctx->dkey[1] = ss[1] = u32_in(in_key + 4); ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]);
ctx->dkey[2] = ss[2] = u32_in(in_key + 8); ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]);
ctx->dkey[3] = ss[3] = u32_in(in_key + 12); ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]);
switch (key_len) { switch (key_len) {
case 16: case 16:
@ -450,8 +442,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break; break;
case 24: case 24:
ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
kdf6(ctx->dkey, 0); kdf6(ctx->dkey, 0);
for (i = 1; i < 7; i++) for (i = 1; i < 7; i++)
kd6(ctx->dkey, i); kd6(ctx->dkey, i);
@ -459,10 +451,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break; break;
case 32: case 32:
ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24)); ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6]));
ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28)); ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7]));
kdf8(ctx->dkey, 0); kdf8(ctx->dkey, 0);
for (i = 1; i < 6; i++) for (i = 1; i < 6; i++)
kd8(ctx->dkey, i); kd8(ctx->dkey, i);
@ -484,6 +476,8 @@ static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src)
static struct crypto_alg aes_alg = { static struct crypto_alg aes_alg = {
.cra_name = "aes", .cra_name = "aes",
.cra_driver_name = "aes-i586",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx), .cra_ctxsize = sizeof(struct aes_ctx),

View File

@ -74,8 +74,6 @@ static inline u8 byte(const u32 x, const unsigned n)
return x >> (n << 3); return x >> (n << 3);
} }
#define u32_in(x) le32_to_cpu(*(const __le32 *)(x))
struct aes_ctx struct aes_ctx
{ {
u32 key_length; u32 key_length;
@ -234,6 +232,7 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
u32 *flags) u32 *flags)
{ {
struct aes_ctx *ctx = ctx_arg; struct aes_ctx *ctx = ctx_arg;
const __le32 *key = (const __le32 *)in_key;
u32 i, j, t, u, v, w; u32 i, j, t, u, v, w;
if (key_len != 16 && key_len != 24 && key_len != 32) { if (key_len != 16 && key_len != 24 && key_len != 32) {
@ -243,10 +242,10 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
ctx->key_length = key_len; ctx->key_length = key_len;
D_KEY[key_len + 24] = E_KEY[0] = u32_in(in_key); D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]);
D_KEY[key_len + 25] = E_KEY[1] = u32_in(in_key + 4); D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]);
D_KEY[key_len + 26] = E_KEY[2] = u32_in(in_key + 8); D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]);
D_KEY[key_len + 27] = E_KEY[3] = u32_in(in_key + 12); D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]);
switch (key_len) { switch (key_len) {
case 16: case 16:
@ -256,17 +255,17 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
break; break;
case 24: case 24:
E_KEY[4] = u32_in(in_key + 16); E_KEY[4] = le32_to_cpu(key[4]);
t = E_KEY[5] = u32_in(in_key + 20); t = E_KEY[5] = le32_to_cpu(key[5]);
for (i = 0; i < 8; ++i) for (i = 0; i < 8; ++i)
loop6 (i); loop6 (i);
break; break;
case 32: case 32:
E_KEY[4] = u32_in(in_key + 16); E_KEY[4] = le32_to_cpu(key[4]);
E_KEY[5] = u32_in(in_key + 20); E_KEY[5] = le32_to_cpu(key[5]);
E_KEY[6] = u32_in(in_key + 24); E_KEY[6] = le32_to_cpu(key[6]);
t = E_KEY[7] = u32_in(in_key + 28); t = E_KEY[7] = le32_to_cpu(key[7]);
for (i = 0; i < 7; ++i) for (i = 0; i < 7; ++i)
loop8(i); loop8(i);
break; break;
@ -290,6 +289,8 @@ extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in);
static struct crypto_alg aes_alg = { static struct crypto_alg aes_alg = {
.cra_name = "aes", .cra_name = "aes",
.cra_driver_name = "aes-x86_64",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx), .cra_ctxsize = sizeof(struct aes_ctx),

View File

@ -157,7 +157,7 @@ config CRYPTO_SERPENT
config CRYPTO_AES config CRYPTO_AES
tristate "AES cipher algorithms" tristate "AES cipher algorithms"
depends on CRYPTO && !(X86 || UML_X86) depends on CRYPTO
help help
AES cipher algorithms (FIPS-197). AES uses the Rijndael AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm. algorithm.

View File

@ -73,9 +73,6 @@ byte(const u32 x, const unsigned n)
return x >> (n << 3); return x >> (n << 3);
} }
#define u32_in(x) le32_to_cpu(*(const u32 *)(x))
#define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from))
struct aes_ctx { struct aes_ctx {
int key_length; int key_length;
u32 E[60]; u32 E[60];
@ -256,6 +253,7 @@ static int
aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
{ {
struct aes_ctx *ctx = ctx_arg; struct aes_ctx *ctx = ctx_arg;
const __le32 *key = (const __le32 *)in_key;
u32 i, t, u, v, w; u32 i, t, u, v, w;
if (key_len != 16 && key_len != 24 && key_len != 32) { if (key_len != 16 && key_len != 24 && key_len != 32) {
@ -265,10 +263,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
ctx->key_length = key_len; ctx->key_length = key_len;
E_KEY[0] = u32_in (in_key); E_KEY[0] = le32_to_cpu(key[0]);
E_KEY[1] = u32_in (in_key + 4); E_KEY[1] = le32_to_cpu(key[1]);
E_KEY[2] = u32_in (in_key + 8); E_KEY[2] = le32_to_cpu(key[2]);
E_KEY[3] = u32_in (in_key + 12); E_KEY[3] = le32_to_cpu(key[3]);
switch (key_len) { switch (key_len) {
case 16: case 16:
@ -278,17 +276,17 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break; break;
case 24: case 24:
E_KEY[4] = u32_in (in_key + 16); E_KEY[4] = le32_to_cpu(key[4]);
t = E_KEY[5] = u32_in (in_key + 20); t = E_KEY[5] = le32_to_cpu(key[5]);
for (i = 0; i < 8; ++i) for (i = 0; i < 8; ++i)
loop6 (i); loop6 (i);
break; break;
case 32: case 32:
E_KEY[4] = u32_in (in_key + 16); E_KEY[4] = le32_to_cpu(key[4]);
E_KEY[5] = u32_in (in_key + 20); E_KEY[5] = le32_to_cpu(key[5]);
E_KEY[6] = u32_in (in_key + 24); E_KEY[6] = le32_to_cpu(key[6]);
t = E_KEY[7] = u32_in (in_key + 28); t = E_KEY[7] = le32_to_cpu(key[7]);
for (i = 0; i < 7; ++i) for (i = 0; i < 7; ++i)
loop8 (i); loop8 (i);
break; break;
@ -324,13 +322,15 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
{ {
const struct aes_ctx *ctx = ctx_arg; const struct aes_ctx *ctx = ctx_arg;
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
u32 b0[4], b1[4]; u32 b0[4], b1[4];
const u32 *kp = E_KEY + 4; const u32 *kp = E_KEY + 4;
b0[0] = u32_in (in) ^ E_KEY[0]; b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0];
b0[1] = u32_in (in + 4) ^ E_KEY[1]; b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1];
b0[2] = u32_in (in + 8) ^ E_KEY[2]; b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2];
b0[3] = u32_in (in + 12) ^ E_KEY[3]; b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3];
if (ctx->key_length > 24) { if (ctx->key_length > 24) {
f_nround (b1, b0, kp); f_nround (b1, b0, kp);
@ -353,10 +353,10 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
f_nround (b1, b0, kp); f_nround (b1, b0, kp);
f_lround (b0, b1, kp); f_lround (b0, b1, kp);
u32_out (out, b0[0]); dst[0] = cpu_to_le32(b0[0]);
u32_out (out + 4, b0[1]); dst[1] = cpu_to_le32(b0[1]);
u32_out (out + 8, b0[2]); dst[2] = cpu_to_le32(b0[2]);
u32_out (out + 12, b0[3]); dst[3] = cpu_to_le32(b0[3]);
} }
/* decrypt a block of text */ /* decrypt a block of text */
@ -377,14 +377,16 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in)
{ {
const struct aes_ctx *ctx = ctx_arg; const struct aes_ctx *ctx = ctx_arg;
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
u32 b0[4], b1[4]; u32 b0[4], b1[4];
const int key_len = ctx->key_length; const int key_len = ctx->key_length;
const u32 *kp = D_KEY + key_len + 20; const u32 *kp = D_KEY + key_len + 20;
b0[0] = u32_in (in) ^ E_KEY[key_len + 24]; b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24];
b0[1] = u32_in (in + 4) ^ E_KEY[key_len + 25]; b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25];
b0[2] = u32_in (in + 8) ^ E_KEY[key_len + 26]; b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26];
b0[3] = u32_in (in + 12) ^ E_KEY[key_len + 27]; b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27];
if (key_len > 24) { if (key_len > 24) {
i_nround (b1, b0, kp); i_nround (b1, b0, kp);
@ -407,18 +409,21 @@ static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in)
i_nround (b1, b0, kp); i_nround (b1, b0, kp);
i_lround (b0, b1, kp); i_lround (b0, b1, kp);
u32_out (out, b0[0]); dst[0] = cpu_to_le32(b0[0]);
u32_out (out + 4, b0[1]); dst[1] = cpu_to_le32(b0[1]);
u32_out (out + 8, b0[2]); dst[2] = cpu_to_le32(b0[2]);
u32_out (out + 12, b0[3]); dst[3] = cpu_to_le32(b0[3]);
} }
static struct crypto_alg aes_alg = { static struct crypto_alg aes_alg = {
.cra_name = "aes", .cra_name = "aes",
.cra_driver_name = "aes-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx), .cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list), .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = { .cra_u = {

View File

@ -32,8 +32,10 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/byteorder.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#define ANUBIS_MIN_KEY_SIZE 16 #define ANUBIS_MIN_KEY_SIZE 16
#define ANUBIS_MAX_KEY_SIZE 40 #define ANUBIS_MAX_KEY_SIZE 40
@ -461,8 +463,8 @@ static const u32 rc[] = {
static int anubis_setkey(void *ctx_arg, const u8 *in_key, static int anubis_setkey(void *ctx_arg, const u8 *in_key,
unsigned int key_len, u32 *flags) unsigned int key_len, u32 *flags)
{ {
const __be32 *key = (const __be32 *)in_key;
int N, R, i, pos, r; int N, R, i, r;
u32 kappa[ANUBIS_MAX_N]; u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N];
@ -483,13 +485,8 @@ static int anubis_setkey(void *ctx_arg, const u8 *in_key,
ctx->R = R = 8 + N; ctx->R = R = 8 + N;
/* * map cipher key to initial key state (mu): */ /* * map cipher key to initial key state (mu): */
for (i = 0, pos = 0; i < N; i++, pos += 4) { for (i = 0; i < N; i++)
kappa[i] = kappa[i] = be32_to_cpu(key[i]);
(in_key[pos ] << 24) ^
(in_key[pos + 1] << 16) ^
(in_key[pos + 2] << 8) ^
(in_key[pos + 3] );
}
/* /*
* generate R + 1 round keys: * generate R + 1 round keys:
@ -578,7 +575,9 @@ static int anubis_setkey(void *ctx_arg, const u8 *in_key,
static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
u8 *ciphertext, const u8 *plaintext, const int R) u8 *ciphertext, const u8 *plaintext, const int R)
{ {
int i, pos, r; const __be32 *src = (const __be32 *)plaintext;
__be32 *dst = (__be32 *)ciphertext;
int i, r;
u32 state[4]; u32 state[4];
u32 inter[4]; u32 inter[4];
@ -586,14 +585,8 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
* map plaintext block to cipher state (mu) * map plaintext block to cipher state (mu)
* and add initial round key (sigma[K^0]): * and add initial round key (sigma[K^0]):
*/ */
for (i = 0, pos = 0; i < 4; i++, pos += 4) { for (i = 0; i < 4; i++)
state[i] = state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i];
(plaintext[pos ] << 24) ^
(plaintext[pos + 1] << 16) ^
(plaintext[pos + 2] << 8) ^
(plaintext[pos + 3] ) ^
roundKey[0][i];
}
/* /*
* R - 1 full rounds: * R - 1 full rounds:
@ -663,13 +656,8 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
* map cipher state to ciphertext block (mu^{-1}): * map cipher state to ciphertext block (mu^{-1}):
*/ */
for (i = 0, pos = 0; i < 4; i++, pos += 4) { for (i = 0; i < 4; i++)
u32 w = inter[i]; dst[i] = cpu_to_be32(inter[i]);
ciphertext[pos ] = (u8)(w >> 24);
ciphertext[pos + 1] = (u8)(w >> 16);
ciphertext[pos + 2] = (u8)(w >> 8);
ciphertext[pos + 3] = (u8)(w );
}
} }
static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
@ -689,6 +677,7 @@ static struct crypto_alg anubis_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ANUBIS_BLOCK_SIZE, .cra_blocksize = ANUBIS_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct anubis_ctx), .cra_ctxsize = sizeof (struct anubis_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(anubis_alg.cra_list), .cra_list = LIST_HEAD_INIT(anubis_alg.cra_list),
.cra_u = { .cipher = { .cra_u = { .cipher = {

View File

@ -3,6 +3,7 @@
* *
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 David S. Miller (davem@redhat.com) * Copyright (c) 2002 David S. Miller (davem@redhat.com)
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
* *
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Möller. * and Nettle, by Niels Möller.
@ -18,9 +19,11 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kmod.h> #include <linux/kmod.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h>
#include "internal.h" #include "internal.h"
LIST_HEAD(crypto_alg_list); LIST_HEAD(crypto_alg_list);
@ -39,6 +42,7 @@ static inline void crypto_alg_put(struct crypto_alg *alg)
static struct crypto_alg *crypto_alg_lookup(const char *name) static struct crypto_alg *crypto_alg_lookup(const char *name)
{ {
struct crypto_alg *q, *alg = NULL; struct crypto_alg *q, *alg = NULL;
int best = -1;
if (!name) if (!name)
return NULL; return NULL;
@ -46,12 +50,24 @@ static struct crypto_alg *crypto_alg_lookup(const char *name)
down_read(&crypto_alg_sem); down_read(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) { list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (!(strcmp(q->cra_name, name))) { int exact, fuzzy;
if (crypto_alg_get(q))
exact = !strcmp(q->cra_driver_name, name);
fuzzy = !strcmp(q->cra_name, name);
if (!exact && !(fuzzy && q->cra_priority > best))
continue;
if (unlikely(!crypto_alg_get(q)))
continue;
best = q->cra_priority;
if (alg)
crypto_alg_put(alg);
alg = q; alg = q;
if (exact)
break; break;
} }
}
up_read(&crypto_alg_sem); up_read(&crypto_alg_sem);
return alg; return alg;
@ -207,9 +223,26 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
kfree(tfm); kfree(tfm);
} }
static inline int crypto_set_driver_name(struct crypto_alg *alg)
{
static const char suffix[] = "-generic";
char *driver_name = (char *)alg->cra_driver_name;
int len;
if (*driver_name)
return 0;
len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
memcpy(driver_name + len, suffix, sizeof(suffix));
return 0;
}
int crypto_register_alg(struct crypto_alg *alg) int crypto_register_alg(struct crypto_alg *alg)
{ {
int ret = 0; int ret;
struct crypto_alg *q; struct crypto_alg *q;
if (alg->cra_alignmask & (alg->cra_alignmask + 1)) if (alg->cra_alignmask & (alg->cra_alignmask + 1))
@ -218,13 +251,20 @@ int crypto_register_alg(struct crypto_alg *alg)
if (alg->cra_alignmask & alg->cra_blocksize) if (alg->cra_alignmask & alg->cra_blocksize)
return -EINVAL; return -EINVAL;
if (alg->cra_blocksize > PAGE_SIZE) if (alg->cra_blocksize > PAGE_SIZE / 8)
return -EINVAL; return -EINVAL;
if (alg->cra_priority < 0)
return -EINVAL;
ret = crypto_set_driver_name(alg);
if (unlikely(ret))
return ret;
down_write(&crypto_alg_sem); down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) { list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (!(strcmp(q->cra_name, alg->cra_name))) { if (!strcmp(q->cra_driver_name, alg->cra_driver_name)) {
ret = -EEXIST; ret = -EEXIST;
goto out; goto out;
} }

View File

@ -19,8 +19,10 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/byteorder.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#define BF_BLOCK_SIZE 8 #define BF_BLOCK_SIZE 8
#define BF_MIN_KEY_SIZE 4 #define BF_MIN_KEY_SIZE 4
@ -451,6 +453,7 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE, .cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx), .cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = { .cra_u = { .cipher = {

View File

@ -21,11 +21,13 @@
*/ */
#include <asm/byteorder.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h>
#define CAST5_BLOCK_SIZE 8 #define CAST5_BLOCK_SIZE 8
#define CAST5_MIN_KEY_SIZE 5 #define CAST5_MIN_KEY_SIZE 5
@ -578,6 +580,8 @@ static const u32 sb8[256] = {
static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
{ {
struct cast5_ctx *c = (struct cast5_ctx *) ctx; struct cast5_ctx *c = (struct cast5_ctx *) ctx;
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 l, r, t; u32 l, r, t;
u32 I; /* used by the Fx macros */ u32 I; /* used by the Fx macros */
u32 *Km; u32 *Km;
@ -589,8 +593,8 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
/* (L0,R0) <-- (m1...m64). (Split the plaintext into left and /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and
* right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.)
*/ */
l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; l = be32_to_cpu(src[0]);
r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; r = be32_to_cpu(src[1]);
/* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows:
* Li = Ri-1; * Li = Ri-1;
@ -634,19 +638,15 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
/* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and
* concatenate to form the ciphertext.) */ * concatenate to form the ciphertext.) */
outbuf[0] = (r >> 24) & 0xff; dst[0] = cpu_to_be32(r);
outbuf[1] = (r >> 16) & 0xff; dst[1] = cpu_to_be32(l);
outbuf[2] = (r >> 8) & 0xff;
outbuf[3] = r & 0xff;
outbuf[4] = (l >> 24) & 0xff;
outbuf[5] = (l >> 16) & 0xff;
outbuf[6] = (l >> 8) & 0xff;
outbuf[7] = l & 0xff;
} }
static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
{ {
struct cast5_ctx *c = (struct cast5_ctx *) ctx; struct cast5_ctx *c = (struct cast5_ctx *) ctx;
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 l, r, t; u32 l, r, t;
u32 I; u32 I;
u32 *Km; u32 *Km;
@ -655,8 +655,8 @@ static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
Km = c->Km; Km = c->Km;
Kr = c->Kr; Kr = c->Kr;
l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; l = be32_to_cpu(src[0]);
r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; r = be32_to_cpu(src[1]);
if (!(c->rr)) { if (!(c->rr)) {
t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
@ -690,14 +690,8 @@ static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
} }
outbuf[0] = (r >> 24) & 0xff; dst[0] = cpu_to_be32(r);
outbuf[1] = (r >> 16) & 0xff; dst[1] = cpu_to_be32(l);
outbuf[2] = (r >> 8) & 0xff;
outbuf[3] = r & 0xff;
outbuf[4] = (l >> 24) & 0xff;
outbuf[5] = (l >> 16) & 0xff;
outbuf[6] = (l >> 8) & 0xff;
outbuf[7] = l & 0xff;
} }
static void key_schedule(u32 * x, u32 * z, u32 * k) static void key_schedule(u32 * x, u32 * z, u32 * k)
@ -782,7 +776,7 @@ cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags)
u32 x[4]; u32 x[4];
u32 z[4]; u32 z[4];
u32 k[16]; u32 k[16];
u8 p_key[16]; __be32 p_key[4];
struct cast5_ctx *c = (struct cast5_ctx *) ctx; struct cast5_ctx *c = (struct cast5_ctx *) ctx;
if (key_len < 5 || key_len > 16) { if (key_len < 5 || key_len > 16) {
@ -796,12 +790,10 @@ cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags)
memcpy(p_key, key, key_len); memcpy(p_key, key, key_len);
x[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; x[0] = be32_to_cpu(p_key[0]);
x[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; x[1] = be32_to_cpu(p_key[1]);
x[2] = x[2] = be32_to_cpu(p_key[2]);
p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; x[3] = be32_to_cpu(p_key[3]);
x[3] =
p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15];
key_schedule(x, z, k); key_schedule(x, z, k);
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
@ -817,6 +809,7 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST5_BLOCK_SIZE, .cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx), .cra_ctxsize = sizeof(struct cast5_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cra_u = {

View File

@ -18,11 +18,13 @@
*/ */
#include <asm/byteorder.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h>
#define CAST6_BLOCK_SIZE 16 #define CAST6_BLOCK_SIZE 16
#define CAST6_MIN_KEY_SIZE 16 #define CAST6_MIN_KEY_SIZE 16
@ -384,7 +386,7 @@ cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags)
{ {
int i; int i;
u32 key[8]; u32 key[8];
u8 p_key[32]; /* padded key */ __be32 p_key[8]; /* padded key */
struct cast6_ctx *c = (struct cast6_ctx *) ctx; struct cast6_ctx *c = (struct cast6_ctx *) ctx;
if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { if (key_len < 16 || key_len > 32 || key_len % 4 != 0) {
@ -395,14 +397,14 @@ cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags)
memset (p_key, 0, 32); memset (p_key, 0, 32);
memcpy (p_key, in_key, key_len); memcpy (p_key, in_key, key_len);
key[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; /* A */ key[0] = be32_to_cpu(p_key[0]); /* A */
key[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; /* B */ key[1] = be32_to_cpu(p_key[1]); /* B */
key[2] = p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; /* C */ key[2] = be32_to_cpu(p_key[2]); /* C */
key[3] = p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; /* D */ key[3] = be32_to_cpu(p_key[3]); /* D */
key[4] = p_key[16] << 24 | p_key[17] << 16 | p_key[18] << 8 | p_key[19]; /* E */ key[4] = be32_to_cpu(p_key[4]); /* E */
key[5] = p_key[20] << 24 | p_key[21] << 16 | p_key[22] << 8 | p_key[23]; /* F */ key[5] = be32_to_cpu(p_key[5]); /* F */
key[6] = p_key[24] << 24 | p_key[25] << 16 | p_key[26] << 8 | p_key[27]; /* G */ key[6] = be32_to_cpu(p_key[6]); /* G */
key[7] = p_key[28] << 24 | p_key[29] << 16 | p_key[30] << 8 | p_key[31]; /* H */ key[7] = be32_to_cpu(p_key[7]); /* H */
@ -444,14 +446,16 @@ static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) {
static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
struct cast6_ctx * c = (struct cast6_ctx *)ctx; struct cast6_ctx * c = (struct cast6_ctx *)ctx;
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4]; u32 block[4];
u32 * Km; u32 * Km;
u8 * Kr; u8 * Kr;
block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; block[0] = be32_to_cpu(src[0]);
block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; block[1] = be32_to_cpu(src[1]);
block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; block[2] = be32_to_cpu(src[2]);
block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; block[3] = be32_to_cpu(src[3]);
Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km);
@ -466,34 +470,24 @@ static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km);
Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km);
outbuf[0] = (block[0] >> 24) & 0xff; dst[0] = cpu_to_be32(block[0]);
outbuf[1] = (block[0] >> 16) & 0xff; dst[1] = cpu_to_be32(block[1]);
outbuf[2] = (block[0] >> 8) & 0xff; dst[2] = cpu_to_be32(block[2]);
outbuf[3] = block[0] & 0xff; dst[3] = cpu_to_be32(block[3]);
outbuf[4] = (block[1] >> 24) & 0xff;
outbuf[5] = (block[1] >> 16) & 0xff;
outbuf[6] = (block[1] >> 8) & 0xff;
outbuf[7] = block[1] & 0xff;
outbuf[8] = (block[2] >> 24) & 0xff;
outbuf[9] = (block[2] >> 16) & 0xff;
outbuf[10] = (block[2] >> 8) & 0xff;
outbuf[11] = block[2] & 0xff;
outbuf[12] = (block[3] >> 24) & 0xff;
outbuf[13] = (block[3] >> 16) & 0xff;
outbuf[14] = (block[3] >> 8) & 0xff;
outbuf[15] = block[3] & 0xff;
} }
static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
struct cast6_ctx * c = (struct cast6_ctx *)ctx; struct cast6_ctx * c = (struct cast6_ctx *)ctx;
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4]; u32 block[4];
u32 * Km; u32 * Km;
u8 * Kr; u8 * Kr;
block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; block[0] = be32_to_cpu(src[0]);
block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; block[1] = be32_to_cpu(src[1]);
block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; block[2] = be32_to_cpu(src[2]);
block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; block[3] = be32_to_cpu(src[3]);
Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km);
@ -508,22 +502,10 @@ static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km);
Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km);
outbuf[0] = (block[0] >> 24) & 0xff; dst[0] = cpu_to_be32(block[0]);
outbuf[1] = (block[0] >> 16) & 0xff; dst[1] = cpu_to_be32(block[1]);
outbuf[2] = (block[0] >> 8) & 0xff; dst[2] = cpu_to_be32(block[2]);
outbuf[3] = block[0] & 0xff; dst[3] = cpu_to_be32(block[3]);
outbuf[4] = (block[1] >> 24) & 0xff;
outbuf[5] = (block[1] >> 16) & 0xff;
outbuf[6] = (block[1] >> 8) & 0xff;
outbuf[7] = block[1] & 0xff;
outbuf[8] = (block[2] >> 24) & 0xff;
outbuf[9] = (block[2] >> 16) & 0xff;
outbuf[10] = (block[2] >> 8) & 0xff;
outbuf[11] = block[2] & 0xff;
outbuf[12] = (block[3] >> 24) & 0xff;
outbuf[13] = (block[3] >> 16) & 0xff;
outbuf[14] = (block[3] >> 8) & 0xff;
outbuf[15] = block[3] & 0xff;
} }
static struct crypto_alg alg = { static struct crypto_alg alg = {
@ -531,6 +513,7 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE, .cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx), .cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cra_u = {

View File

@ -212,9 +212,10 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
struct crypto_tfm *tfm = desc->tfm; struct crypto_tfm *tfm = desc->tfm;
void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
int bsize = crypto_tfm_alg_blocksize(tfm); int bsize = crypto_tfm_alg_blocksize(tfm);
unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
u8 stack[src == dst ? bsize : 0]; u8 stack[src == dst ? bsize + alignmask : 0];
u8 *buf = stack; u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
u8 **dst_p = src == dst ? &buf : &dst; u8 **dst_p = src == dst ? &buf : &dst;
void (*fn)(void *, u8 *, const u8 *) = desc->crfn; void (*fn)(void *, u8 *, const u8 *) = desc->crfn;

View File

@ -16,6 +16,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/crc32c.h> #include <linux/crc32c.h>
#include <linux/types.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#define CHKSUM_BLOCK_SIZE 32 #define CHKSUM_BLOCK_SIZE 32

View File

@ -12,11 +12,13 @@
* *
*/ */
#include <asm/byteorder.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#define DES_KEY_SIZE 8 #define DES_KEY_SIZE 8
#define DES_EXPKEY_WORDS 32 #define DES_EXPKEY_WORDS 32
@ -947,6 +949,7 @@ static struct crypto_alg des_alg = {
.cra_blocksize = DES_BLOCK_SIZE, .cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx), .cra_ctxsize = sizeof(struct des_ctx),
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(des_alg.cra_list), .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
.cra_u = { .cipher = { .cra_u = { .cipher = {
.cia_min_keysize = DES_KEY_SIZE, .cia_min_keysize = DES_KEY_SIZE,

View File

@ -2,6 +2,7 @@
* Cryptographic API. * Cryptographic API.
* *
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
@ -16,10 +17,15 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
extern enum km_type crypto_km_types[]; extern enum km_type crypto_km_types[];
static inline enum km_type crypto_kmap_type(int out) static inline enum km_type crypto_kmap_type(int out)

View File

@ -22,8 +22,10 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/byteorder.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#define KHAZAD_KEY_SIZE 16 #define KHAZAD_KEY_SIZE 16
#define KHAZAD_BLOCK_SIZE 8 #define KHAZAD_BLOCK_SIZE 8
@ -755,8 +757,8 @@ static const u64 c[KHAZAD_ROUNDS + 1] = {
static int khazad_setkey(void *ctx_arg, const u8 *in_key, static int khazad_setkey(void *ctx_arg, const u8 *in_key,
unsigned int key_len, u32 *flags) unsigned int key_len, u32 *flags)
{ {
struct khazad_ctx *ctx = ctx_arg; struct khazad_ctx *ctx = ctx_arg;
const __be64 *key = (const __be64 *)in_key;
int r; int r;
const u64 *S = T7; const u64 *S = T7;
u64 K2, K1; u64 K2, K1;
@ -767,22 +769,8 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key,
return -EINVAL; return -EINVAL;
} }
K2 = ((u64)in_key[ 0] << 56) ^ K2 = be64_to_cpu(key[0]);
((u64)in_key[ 1] << 48) ^ K1 = be64_to_cpu(key[1]);
((u64)in_key[ 2] << 40) ^
((u64)in_key[ 3] << 32) ^
((u64)in_key[ 4] << 24) ^
((u64)in_key[ 5] << 16) ^
((u64)in_key[ 6] << 8) ^
((u64)in_key[ 7] );
K1 = ((u64)in_key[ 8] << 56) ^
((u64)in_key[ 9] << 48) ^
((u64)in_key[10] << 40) ^
((u64)in_key[11] << 32) ^
((u64)in_key[12] << 24) ^
((u64)in_key[13] << 16) ^
((u64)in_key[14] << 8) ^
((u64)in_key[15] );
/* setup the encrypt key */ /* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) { for (r = 0; r <= KHAZAD_ROUNDS; r++) {
@ -820,19 +808,12 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key,
static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
u8 *ciphertext, const u8 *plaintext) u8 *ciphertext, const u8 *plaintext)
{ {
const __be64 *src = (const __be64 *)plaintext;
__be64 *dst = (__be64 *)ciphertext;
int r; int r;
u64 state; u64 state;
state = ((u64)plaintext[0] << 56) ^ state = be64_to_cpu(*src) ^ roundKey[0];
((u64)plaintext[1] << 48) ^
((u64)plaintext[2] << 40) ^
((u64)plaintext[3] << 32) ^
((u64)plaintext[4] << 24) ^
((u64)plaintext[5] << 16) ^
((u64)plaintext[6] << 8) ^
((u64)plaintext[7] ) ^
roundKey[0];
for (r = 1; r < KHAZAD_ROUNDS; r++) { for (r = 1; r < KHAZAD_ROUNDS; r++) {
state = T0[(int)(state >> 56) ] ^ state = T0[(int)(state >> 56) ] ^
@ -856,15 +837,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
(T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
roundKey[KHAZAD_ROUNDS]; roundKey[KHAZAD_ROUNDS];
ciphertext[0] = (u8)(state >> 56); *dst = cpu_to_be64(state);
ciphertext[1] = (u8)(state >> 48);
ciphertext[2] = (u8)(state >> 40);
ciphertext[3] = (u8)(state >> 32);
ciphertext[4] = (u8)(state >> 24);
ciphertext[5] = (u8)(state >> 16);
ciphertext[6] = (u8)(state >> 8);
ciphertext[7] = (u8)(state );
} }
static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
@ -884,6 +857,7 @@ static struct crypto_alg khazad_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = KHAZAD_BLOCK_SIZE, .cra_blocksize = KHAZAD_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct khazad_ctx), .cra_ctxsize = sizeof (struct khazad_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(khazad_alg.cra_list), .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list),
.cra_u = { .cipher = { .cra_u = { .cipher = {

View File

@ -24,6 +24,7 @@
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#define MD4_DIGEST_SIZE 16 #define MD4_DIGEST_SIZE 16

View File

@ -19,6 +19,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#define MD5_DIGEST_SIZE 16 #define MD5_DIGEST_SIZE 16

View File

@ -10,10 +10,12 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <asm/byteorder.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
struct michael_mic_ctx { struct michael_mic_ctx {
@ -43,21 +45,6 @@ do { \
} while (0) } while (0)
static inline u32 get_le32(const u8 *p)
{
return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
}
static inline void put_le32(u8 *p, u32 v)
{
p[0] = v;
p[1] = v >> 8;
p[2] = v >> 16;
p[3] = v >> 24;
}
static void michael_init(void *ctx) static void michael_init(void *ctx)
{ {
struct michael_mic_ctx *mctx = ctx; struct michael_mic_ctx *mctx = ctx;
@ -68,6 +55,7 @@ static void michael_init(void *ctx)
static void michael_update(void *ctx, const u8 *data, unsigned int len) static void michael_update(void *ctx, const u8 *data, unsigned int len)
{ {
struct michael_mic_ctx *mctx = ctx; struct michael_mic_ctx *mctx = ctx;
const __le32 *src;
if (mctx->pending_len) { if (mctx->pending_len) {
int flen = 4 - mctx->pending_len; int flen = 4 - mctx->pending_len;
@ -81,21 +69,23 @@ static void michael_update(void *ctx, const u8 *data, unsigned int len)
if (mctx->pending_len < 4) if (mctx->pending_len < 4)
return; return;
mctx->l ^= get_le32(mctx->pending); src = (const __le32 *)mctx->pending;
mctx->l ^= le32_to_cpup(src);
michael_block(mctx->l, mctx->r); michael_block(mctx->l, mctx->r);
mctx->pending_len = 0; mctx->pending_len = 0;
} }
src = (const __le32 *)data;
while (len >= 4) { while (len >= 4) {
mctx->l ^= get_le32(data); mctx->l ^= le32_to_cpup(src++);
michael_block(mctx->l, mctx->r); michael_block(mctx->l, mctx->r);
data += 4;
len -= 4; len -= 4;
} }
if (len > 0) { if (len > 0) {
mctx->pending_len = len; mctx->pending_len = len;
memcpy(mctx->pending, data, len); memcpy(mctx->pending, src, len);
} }
} }
@ -104,6 +94,7 @@ static void michael_final(void *ctx, u8 *out)
{ {
struct michael_mic_ctx *mctx = ctx; struct michael_mic_ctx *mctx = ctx;
u8 *data = mctx->pending; u8 *data = mctx->pending;
__le32 *dst = (__le32 *)out;
/* Last block and padding (0x5a, 4..7 x 0) */ /* Last block and padding (0x5a, 4..7 x 0) */
switch (mctx->pending_len) { switch (mctx->pending_len) {
@ -125,8 +116,8 @@ static void michael_final(void *ctx, u8 *out)
/* l ^= 0; */ /* l ^= 0; */
michael_block(mctx->l, mctx->r); michael_block(mctx->l, mctx->r);
put_le32(out, mctx->l); dst[0] = cpu_to_le32(mctx->l);
put_le32(out + 4, mctx->r); dst[1] = cpu_to_le32(mctx->r);
} }
@ -134,13 +125,16 @@ static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen,
u32 *flags) u32 *flags)
{ {
struct michael_mic_ctx *mctx = ctx; struct michael_mic_ctx *mctx = ctx;
const __le32 *data = (const __le32 *)key;
if (keylen != 8) { if (keylen != 8) {
if (flags) if (flags)
*flags = CRYPTO_TFM_RES_BAD_KEY_LEN; *flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL; return -EINVAL;
} }
mctx->l = get_le32(key);
mctx->r = get_le32(key + 4); mctx->l = le32_to_cpu(data[0]);
mctx->r = le32_to_cpu(data[1]);
return 0; return 0;
} }

View File

@ -4,6 +4,7 @@
* Procfs information. * Procfs information.
* *
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
@ -18,9 +19,6 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include "internal.h" #include "internal.h"
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
static void *c_start(struct seq_file *m, loff_t *pos) static void *c_start(struct seq_file *m, loff_t *pos)
{ {
struct list_head *v; struct list_head *v;
@ -53,7 +51,9 @@ static int c_show(struct seq_file *m, void *p)
struct crypto_alg *alg = (struct crypto_alg *)p; struct crypto_alg *alg = (struct crypto_alg *)p;
seq_printf(m, "name : %s\n", alg->cra_name); seq_printf(m, "name : %s\n", alg->cra_name);
seq_printf(m, "driver : %s\n", alg->cra_driver_name);
seq_printf(m, "module : %s\n", module_name(alg->cra_module)); seq_printf(m, "module : %s\n", module_name(alg->cra_module));
seq_printf(m, "priority : %d\n", alg->cra_priority);
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_CIPHER: case CRYPTO_ALG_TYPE_CIPHER:

View File

@ -20,6 +20,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
/* Key is padded to the maximum of 256 bits before round key generation. /* Key is padded to the maximum of 256 bits before round key generation.
* Any key length <= 256 bits (32 bytes) is allowed by the algorithm. * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
@ -552,6 +553,7 @@ static struct crypto_alg tnepres_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE, .cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx), .cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list), .cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = { .cra_u = { .cipher = {

View File

@ -21,6 +21,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/cryptohash.h> #include <linux/cryptohash.h>
#include <linux/types.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
@ -48,23 +49,33 @@ static void sha1_init(void *ctx)
static void sha1_update(void *ctx, const u8 *data, unsigned int len) static void sha1_update(void *ctx, const u8 *data, unsigned int len)
{ {
struct sha1_ctx *sctx = ctx; struct sha1_ctx *sctx = ctx;
unsigned int i, j; unsigned int partial, done;
const u8 *src;
partial = sctx->count & 0x3f;
sctx->count += len;
done = 0;
src = data;
if ((partial + len) > 63) {
u32 temp[SHA_WORKSPACE_WORDS]; u32 temp[SHA_WORKSPACE_WORDS];
j = (sctx->count >> 3) & 0x3f; if (partial) {
sctx->count += len << 3; done = -partial;
memcpy(sctx->buffer + partial, data, done + 64);
src = sctx->buffer;
}
do {
sha_transform(sctx->state, src, temp);
done += 64;
src = data + done;
} while (done + 63 < len);
if ((j + len) > 63) {
memcpy(&sctx->buffer[j], data, (i = 64-j));
sha_transform(sctx->state, sctx->buffer, temp);
for ( ; i + 63 < len; i += 64) {
sha_transform(sctx->state, &data[i], temp);
}
j = 0;
}
else i = 0;
memset(temp, 0, sizeof(temp)); memset(temp, 0, sizeof(temp));
memcpy(&sctx->buffer[j], &data[i], len - i); partial = 0;
}
memcpy(sctx->buffer + partial, src, len - done);
} }
@ -72,37 +83,24 @@ static void sha1_update(void *ctx, const u8 *data, unsigned int len)
static void sha1_final(void* ctx, u8 *out) static void sha1_final(void* ctx, u8 *out)
{ {
struct sha1_ctx *sctx = ctx; struct sha1_ctx *sctx = ctx;
u32 i, j, index, padlen; __be32 *dst = (__be32 *)out;
u64 t; u32 i, index, padlen;
u8 bits[8] = { 0, }; __be64 bits;
static const u8 padding[64] = { 0x80, }; static const u8 padding[64] = { 0x80, };
t = sctx->count; bits = cpu_to_be64(sctx->count << 3);
bits[7] = 0xff & t; t>>=8;
bits[6] = 0xff & t; t>>=8;
bits[5] = 0xff & t; t>>=8;
bits[4] = 0xff & t; t>>=8;
bits[3] = 0xff & t; t>>=8;
bits[2] = 0xff & t; t>>=8;
bits[1] = 0xff & t; t>>=8;
bits[0] = 0xff & t;
/* Pad out to 56 mod 64 */ /* Pad out to 56 mod 64 */
index = (sctx->count >> 3) & 0x3f; index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index); padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(sctx, padding, padlen); sha1_update(sctx, padding, padlen);
/* Append length */ /* Append length */
sha1_update(sctx, bits, sizeof bits); sha1_update(sctx, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */ /* Store state in digest */
for (i = j = 0; i < 5; i++, j += 4) { for (i = 0; i < 5; i++)
u32 t2 = sctx->state[i]; dst[i] = cpu_to_be32(sctx->state[i]);
out[j+3] = t2 & 0xff; t2>>=8;
out[j+2] = t2 & 0xff; t2>>=8;
out[j+1] = t2 & 0xff; t2>>=8;
out[j ] = t2 & 0xff;
}
/* Wipe context */ /* Wipe context */
memset(sctx, 0, sizeof *sctx); memset(sctx, 0, sizeof *sctx);

View File

@ -20,6 +20,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
@ -279,22 +280,15 @@ static void sha256_update(void *ctx, const u8 *data, unsigned int len)
static void sha256_final(void* ctx, u8 *out) static void sha256_final(void* ctx, u8 *out)
{ {
struct sha256_ctx *sctx = ctx; struct sha256_ctx *sctx = ctx;
u8 bits[8]; __be32 *dst = (__be32 *)out;
unsigned int index, pad_len, t; __be32 bits[2];
int i, j; unsigned int index, pad_len;
int i;
static const u8 padding[64] = { 0x80, }; static const u8 padding[64] = { 0x80, };
/* Save number of bits */ /* Save number of bits */
t = sctx->count[0]; bits[1] = cpu_to_be32(sctx->count[0]);
bits[7] = t; t >>= 8; bits[0] = cpu_to_be32(sctx->count[1]);
bits[6] = t; t >>= 8;
bits[5] = t; t >>= 8;
bits[4] = t;
t = sctx->count[1];
bits[3] = t; t >>= 8;
bits[2] = t; t >>= 8;
bits[1] = t; t >>= 8;
bits[0] = t;
/* Pad out to 56 mod 64. */ /* Pad out to 56 mod 64. */
index = (sctx->count[0] >> 3) & 0x3f; index = (sctx->count[0] >> 3) & 0x3f;
@ -302,16 +296,11 @@ static void sha256_final(void* ctx, u8 *out)
sha256_update(sctx, padding, pad_len); sha256_update(sctx, padding, pad_len);
/* Append length (before padding) */ /* Append length (before padding) */
sha256_update(sctx, bits, 8); sha256_update(sctx, (const u8 *)bits, sizeof(bits));
/* Store state in digest */ /* Store state in digest */
for (i = j = 0; i < 8; i++, j += 4) { for (i = 0; i < 8; i++)
t = sctx->state[i]; dst[i] = cpu_to_be32(sctx->state[i]);
out[j+3] = t; t >>= 8;
out[j+2] = t; t >>= 8;
out[j+1] = t; t >>= 8;
out[j ] = t;
}
/* Zeroize sensitive information. */ /* Zeroize sensitive information. */
memset(sctx, 0, sizeof(*sctx)); memset(sctx, 0, sizeof(*sctx));

View File

@ -17,6 +17,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
@ -235,39 +236,17 @@ static void
sha512_final(void *ctx, u8 *hash) sha512_final(void *ctx, u8 *hash)
{ {
struct sha512_ctx *sctx = ctx; struct sha512_ctx *sctx = ctx;
static u8 padding[128] = { 0x80, }; static u8 padding[128] = { 0x80, };
__be64 *dst = (__be64 *)hash;
u32 t; __be32 bits[4];
u64 t2;
u8 bits[128];
unsigned int index, pad_len; unsigned int index, pad_len;
int i, j; int i;
index = pad_len = t = i = j = 0;
t2 = 0;
/* Save number of bits */ /* Save number of bits */
t = sctx->count[0]; bits[3] = cpu_to_be32(sctx->count[0]);
bits[15] = t; t>>=8; bits[2] = cpu_to_be32(sctx->count[1]);
bits[14] = t; t>>=8; bits[1] = cpu_to_be32(sctx->count[2]);
bits[13] = t; t>>=8; bits[0] = cpu_to_be32(sctx->count[3]);
bits[12] = t;
t = sctx->count[1];
bits[11] = t; t>>=8;
bits[10] = t; t>>=8;
bits[9 ] = t; t>>=8;
bits[8 ] = t;
t = sctx->count[2];
bits[7 ] = t; t>>=8;
bits[6 ] = t; t>>=8;
bits[5 ] = t; t>>=8;
bits[4 ] = t;
t = sctx->count[3];
bits[3 ] = t; t>>=8;
bits[2 ] = t; t>>=8;
bits[1 ] = t; t>>=8;
bits[0 ] = t;
/* Pad out to 112 mod 128. */ /* Pad out to 112 mod 128. */
index = (sctx->count[0] >> 3) & 0x7f; index = (sctx->count[0] >> 3) & 0x7f;
@ -275,20 +254,11 @@ sha512_final(void *ctx, u8 *hash)
sha512_update(sctx, padding, pad_len); sha512_update(sctx, padding, pad_len);
/* Append length (before padding) */ /* Append length (before padding) */
sha512_update(sctx, bits, 16); sha512_update(sctx, (const u8 *)bits, sizeof(bits));
/* Store state in digest */ /* Store state in digest */
for (i = j = 0; i < 8; i++, j += 8) { for (i = 0; i < 8; i++)
t2 = sctx->state[i]; dst[i] = cpu_to_be64(sctx->state[i]);
hash[j+7] = (char)t2 & 0xff; t2>>=8;
hash[j+6] = (char)t2 & 0xff; t2>>=8;
hash[j+5] = (char)t2 & 0xff; t2>>=8;
hash[j+4] = (char)t2 & 0xff; t2>>=8;
hash[j+3] = (char)t2 & 0xff; t2>>=8;
hash[j+2] = (char)t2 & 0xff; t2>>=8;
hash[j+1] = (char)t2 & 0xff; t2>>=8;
hash[j ] = (char)t2 & 0xff;
}
/* Zeroize sensitive information. */ /* Zeroize sensitive information. */
memset(sctx, 0, sizeof(struct sha512_ctx)); memset(sctx, 0, sizeof(struct sha512_ctx));

View File

@ -22,8 +22,10 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/byteorder.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#define TEA_KEY_SIZE 16 #define TEA_KEY_SIZE 16
#define TEA_BLOCK_SIZE 8 #define TEA_BLOCK_SIZE 8
@ -35,9 +37,6 @@
#define XTEA_ROUNDS 32 #define XTEA_ROUNDS 32
#define XTEA_DELTA 0x9e3779b9 #define XTEA_DELTA 0x9e3779b9
#define u32_in(x) le32_to_cpu(*(const __le32 *)(x))
#define u32_out(to, from) (*(__le32 *)(to) = cpu_to_le32(from))
struct tea_ctx { struct tea_ctx {
u32 KEY[4]; u32 KEY[4];
}; };
@ -49,8 +48,8 @@ struct xtea_ctx {
static int tea_setkey(void *ctx_arg, const u8 *in_key, static int tea_setkey(void *ctx_arg, const u8 *in_key,
unsigned int key_len, u32 *flags) unsigned int key_len, u32 *flags)
{ {
struct tea_ctx *ctx = ctx_arg; struct tea_ctx *ctx = ctx_arg;
const __le32 *key = (const __le32 *)in_key;
if (key_len != 16) if (key_len != 16)
{ {
@ -58,10 +57,10 @@ static int tea_setkey(void *ctx_arg, const u8 *in_key,
return -EINVAL; return -EINVAL;
} }
ctx->KEY[0] = u32_in (in_key); ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = u32_in (in_key + 4); ctx->KEY[1] = le32_to_cpu(key[1]);
ctx->KEY[2] = u32_in (in_key + 8); ctx->KEY[2] = le32_to_cpu(key[2]);
ctx->KEY[3] = u32_in (in_key + 12); ctx->KEY[3] = le32_to_cpu(key[3]);
return 0; return 0;
@ -73,9 +72,11 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
u32 k0, k1, k2, k3; u32 k0, k1, k2, k3;
struct tea_ctx *ctx = ctx_arg; struct tea_ctx *ctx = ctx_arg;
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = u32_in (src); y = le32_to_cpu(in[0]);
z = u32_in (src + 4); z = le32_to_cpu(in[1]);
k0 = ctx->KEY[0]; k0 = ctx->KEY[0];
k1 = ctx->KEY[1]; k1 = ctx->KEY[1];
@ -90,19 +91,20 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
} }
u32_out (dst, y); out[0] = cpu_to_le32(y);
u32_out (dst + 4, z); out[1] = cpu_to_le32(z);
} }
static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
{ {
u32 y, z, n, sum; u32 y, z, n, sum;
u32 k0, k1, k2, k3; u32 k0, k1, k2, k3;
struct tea_ctx *ctx = ctx_arg; struct tea_ctx *ctx = ctx_arg;
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = u32_in (src); y = le32_to_cpu(in[0]);
z = u32_in (src + 4); z = le32_to_cpu(in[1]);
k0 = ctx->KEY[0]; k0 = ctx->KEY[0];
k1 = ctx->KEY[1]; k1 = ctx->KEY[1];
@ -119,16 +121,15 @@ static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
sum -= TEA_DELTA; sum -= TEA_DELTA;
} }
u32_out (dst, y); out[0] = cpu_to_le32(y);
u32_out (dst + 4, z); out[1] = cpu_to_le32(z);
} }
static int xtea_setkey(void *ctx_arg, const u8 *in_key, static int xtea_setkey(void *ctx_arg, const u8 *in_key,
unsigned int key_len, u32 *flags) unsigned int key_len, u32 *flags)
{ {
struct xtea_ctx *ctx = ctx_arg; struct xtea_ctx *ctx = ctx_arg;
const __le32 *key = (const __le32 *)in_key;
if (key_len != 16) if (key_len != 16)
{ {
@ -136,10 +137,10 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key,
return -EINVAL; return -EINVAL;
} }
ctx->KEY[0] = u32_in (in_key); ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = u32_in (in_key + 4); ctx->KEY[1] = le32_to_cpu(key[1]);
ctx->KEY[2] = u32_in (in_key + 8); ctx->KEY[2] = le32_to_cpu(key[2]);
ctx->KEY[3] = u32_in (in_key + 12); ctx->KEY[3] = le32_to_cpu(key[3]);
return 0; return 0;
@ -147,14 +148,15 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key,
static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
{ {
u32 y, z, sum = 0; u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS; u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = ctx_arg; struct xtea_ctx *ctx = ctx_arg;
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = u32_in (src); y = le32_to_cpu(in[0]);
z = u32_in (src + 4); z = le32_to_cpu(in[1]);
while (sum != limit) { while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
@ -162,19 +164,19 @@ static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
} }
u32_out (dst, y); out[0] = cpu_to_le32(y);
u32_out (dst + 4, z); out[1] = cpu_to_le32(z);
} }
static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
{ {
u32 y, z, sum; u32 y, z, sum;
struct tea_ctx *ctx = ctx_arg; struct tea_ctx *ctx = ctx_arg;
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = u32_in (src); y = le32_to_cpu(in[0]);
z = u32_in (src + 4); z = le32_to_cpu(in[1]);
sum = XTEA_DELTA * XTEA_ROUNDS; sum = XTEA_DELTA * XTEA_ROUNDS;
@ -184,22 +186,22 @@ static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
} }
u32_out (dst, y); out[0] = cpu_to_le32(y);
u32_out (dst + 4, z); out[1] = cpu_to_le32(z);
} }
static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
{ {
u32 y, z, sum = 0; u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS; u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = ctx_arg; struct xtea_ctx *ctx = ctx_arg;
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = u32_in (src); y = le32_to_cpu(in[0]);
z = u32_in (src + 4); z = le32_to_cpu(in[1]);
while (sum != limit) { while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
@ -207,19 +209,19 @@ static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
} }
u32_out (dst, y); out[0] = cpu_to_le32(y);
u32_out (dst + 4, z); out[1] = cpu_to_le32(z);
} }
static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
{ {
u32 y, z, sum; u32 y, z, sum;
struct tea_ctx *ctx = ctx_arg; struct tea_ctx *ctx = ctx_arg;
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = u32_in (src); y = le32_to_cpu(in[0]);
z = u32_in (src + 4); z = le32_to_cpu(in[1]);
sum = XTEA_DELTA * XTEA_ROUNDS; sum = XTEA_DELTA * XTEA_ROUNDS;
@ -229,9 +231,8 @@ static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
} }
u32_out (dst, y); out[0] = cpu_to_le32(y);
u32_out (dst + 4, z); out[1] = cpu_to_le32(z);
} }
static struct crypto_alg tea_alg = { static struct crypto_alg tea_alg = {
@ -239,6 +240,7 @@ static struct crypto_alg tea_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE, .cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx), .cra_ctxsize = sizeof (struct tea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(tea_alg.cra_list), .cra_list = LIST_HEAD_INIT(tea_alg.cra_list),
.cra_u = { .cipher = { .cra_u = { .cipher = {
@ -254,6 +256,7 @@ static struct crypto_alg xtea_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE, .cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx), .cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list),
.cra_u = { .cipher = { .cra_u = { .cipher = {
@ -269,6 +272,7 @@ static struct crypto_alg xeta_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE, .cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx), .cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list),
.cra_u = { .cipher = { .cra_u = { .cipher = {

View File

@ -24,8 +24,10 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/byteorder.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#define TGR192_DIGEST_SIZE 24 #define TGR192_DIGEST_SIZE 24
#define TGR160_DIGEST_SIZE 20 #define TGR160_DIGEST_SIZE 20
@ -467,18 +469,10 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
u64 a, b, c, aa, bb, cc; u64 a, b, c, aa, bb, cc;
u64 x[8]; u64 x[8];
int i; int i;
const u8 *ptr = data; const __le64 *ptr = (const __le64 *)data;
for (i = 0; i < 8; i++, ptr += 8) { for (i = 0; i < 8; i++)
x[i] = (((u64)ptr[7] ) << 56) ^ x[i] = le64_to_cpu(ptr[i]);
(((u64)ptr[6] & 0xffL) << 48) ^
(((u64)ptr[5] & 0xffL) << 40) ^
(((u64)ptr[4] & 0xffL) << 32) ^
(((u64)ptr[3] & 0xffL) << 24) ^
(((u64)ptr[2] & 0xffL) << 16) ^
(((u64)ptr[1] & 0xffL) << 8) ^
(((u64)ptr[0] & 0xffL) );
}
/* save */ /* save */
a = aa = tctx->a; a = aa = tctx->a;
@ -558,9 +552,10 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len)
static void tgr192_final(void *ctx, u8 * out) static void tgr192_final(void *ctx, u8 * out)
{ {
struct tgr192_ctx *tctx = ctx; struct tgr192_ctx *tctx = ctx;
__be64 *dst = (__be64 *)out;
__be64 *be64p;
__le32 *le32p;
u32 t, msb, lsb; u32 t, msb, lsb;
u8 *p;
int i, j;
tgr192_update(tctx, NULL, 0); /* flush */ ; tgr192_update(tctx, NULL, 0); /* flush */ ;
@ -594,41 +589,16 @@ static void tgr192_final(void *ctx, u8 * out)
memset(tctx->hash, 0, 56); /* fill next block with zeroes */ memset(tctx->hash, 0, 56); /* fill next block with zeroes */
} }
/* append the 64 bit count */ /* append the 64 bit count */
tctx->hash[56] = lsb; le32p = (__le32 *)&tctx->hash[56];
tctx->hash[57] = lsb >> 8; le32p[0] = cpu_to_le32(lsb);
tctx->hash[58] = lsb >> 16; le32p[1] = cpu_to_le32(msb);
tctx->hash[59] = lsb >> 24;
tctx->hash[60] = msb;
tctx->hash[61] = msb >> 8;
tctx->hash[62] = msb >> 16;
tctx->hash[63] = msb >> 24;
tgr192_transform(tctx, tctx->hash); tgr192_transform(tctx, tctx->hash);
p = tctx->hash; be64p = (__be64 *)tctx->hash;
*p++ = tctx->a >> 56; *p++ = tctx->a >> 48; *p++ = tctx->a >> 40; dst[0] = be64p[0] = cpu_to_be64(tctx->a);
*p++ = tctx->a >> 32; *p++ = tctx->a >> 24; *p++ = tctx->a >> 16; dst[1] = be64p[1] = cpu_to_be64(tctx->b);
*p++ = tctx->a >> 8; *p++ = tctx->a;\ dst[2] = be64p[2] = cpu_to_be64(tctx->c);
*p++ = tctx->b >> 56; *p++ = tctx->b >> 48; *p++ = tctx->b >> 40;
*p++ = tctx->b >> 32; *p++ = tctx->b >> 24; *p++ = tctx->b >> 16;
*p++ = tctx->b >> 8; *p++ = tctx->b;
*p++ = tctx->c >> 56; *p++ = tctx->c >> 48; *p++ = tctx->c >> 40;
*p++ = tctx->c >> 32; *p++ = tctx->c >> 24; *p++ = tctx->c >> 16;
*p++ = tctx->c >> 8; *p++ = tctx->c;
/* unpack the hash */
j = 7;
for (i = 0; i < 8; i++) {
out[j--] = (tctx->a >> 8 * i) & 0xff;
}
j = 15;
for (i = 0; i < 8; i++) {
out[j--] = (tctx->b >> 8 * i) & 0xff;
}
j = 23;
for (i = 0; i < 8; i++) {
out[j--] = (tctx->c >> 8 * i) & 0xff;
}
} }
static void tgr160_final(void *ctx, u8 * out) static void tgr160_final(void *ctx, u8 * out)

View File

@ -37,6 +37,8 @@
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition. * Third Edition.
*/ */
#include <asm/byteorder.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/types.h> #include <linux/types.h>
@ -621,13 +623,11 @@ static const u8 calc_sb_tbl[512] = {
* whitening subkey number m. */ * whitening subkey number m. */
#define INPACK(n, x, m) \ #define INPACK(n, x, m) \
x = in[4 * (n)] ^ (in[4 * (n) + 1] << 8) \ x = le32_to_cpu(src[n]) ^ ctx->w[m]
^ (in[4 * (n) + 2] << 16) ^ (in[4 * (n) + 3] << 24) ^ ctx->w[m]
#define OUTUNPACK(n, x, m) \ #define OUTUNPACK(n, x, m) \
x ^= ctx->w[m]; \ x ^= ctx->w[m]; \
out[4 * (n)] = x; out[4 * (n) + 1] = x >> 8; \ dst[n] = cpu_to_le32(x)
out[4 * (n) + 2] = x >> 16; out[4 * (n) + 3] = x >> 24
#define TF_MIN_KEY_SIZE 16 #define TF_MIN_KEY_SIZE 16
#define TF_MAX_KEY_SIZE 32 #define TF_MAX_KEY_SIZE 32
@ -804,6 +804,8 @@ static int twofish_setkey(void *cx, const u8 *key,
static void twofish_encrypt(void *cx, u8 *out, const u8 *in) static void twofish_encrypt(void *cx, u8 *out, const u8 *in)
{ {
struct twofish_ctx *ctx = cx; struct twofish_ctx *ctx = cx;
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
/* The four 32-bit chunks of the text. */ /* The four 32-bit chunks of the text. */
u32 a, b, c, d; u32 a, b, c, d;
@ -839,6 +841,8 @@ static void twofish_encrypt(void *cx, u8 *out, const u8 *in)
static void twofish_decrypt(void *cx, u8 *out, const u8 *in) static void twofish_decrypt(void *cx, u8 *out, const u8 *in)
{ {
struct twofish_ctx *ctx = cx; struct twofish_ctx *ctx = cx;
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
/* The four 32-bit chunks of the text. */ /* The four 32-bit chunks of the text. */
u32 a, b, c, d; u32 a, b, c, d;
@ -875,6 +879,7 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE, .cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx), .cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = { .cra_u = { .cipher = {

View File

@ -22,8 +22,10 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/byteorder.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h>
#define WP512_DIGEST_SIZE 64 #define WP512_DIGEST_SIZE 64
#define WP384_DIGEST_SIZE 48 #define WP384_DIGEST_SIZE 48
@ -778,19 +780,10 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) {
u64 block[8]; /* mu(buffer) */ u64 block[8]; /* mu(buffer) */
u64 state[8]; /* the cipher state */ u64 state[8]; /* the cipher state */
u64 L[8]; u64 L[8];
u8 *buffer = wctx->buffer; const __be64 *buffer = (const __be64 *)wctx->buffer;
for (i = 0; i < 8; i++, buffer += 8) { for (i = 0; i < 8; i++)
block[i] = block[i] = be64_to_cpu(buffer[i]);
(((u64)buffer[0] ) << 56) ^
(((u64)buffer[1] & 0xffL) << 48) ^
(((u64)buffer[2] & 0xffL) << 40) ^
(((u64)buffer[3] & 0xffL) << 32) ^
(((u64)buffer[4] & 0xffL) << 24) ^
(((u64)buffer[5] & 0xffL) << 16) ^
(((u64)buffer[6] & 0xffL) << 8) ^
(((u64)buffer[7] & 0xffL) );
}
state[0] = block[0] ^ (K[0] = wctx->hash[0]); state[0] = block[0] ^ (K[0] = wctx->hash[0]);
state[1] = block[1] ^ (K[1] = wctx->hash[1]); state[1] = block[1] ^ (K[1] = wctx->hash[1]);
@ -1069,7 +1062,7 @@ static void wp512_final(void *ctx, u8 *out)
u8 *bitLength = wctx->bitLength; u8 *bitLength = wctx->bitLength;
int bufferBits = wctx->bufferBits; int bufferBits = wctx->bufferBits;
int bufferPos = wctx->bufferPos; int bufferPos = wctx->bufferPos;
u8 *digest = out; __be64 *digest = (__be64 *)out;
buffer[bufferPos] |= 0x80U >> (bufferBits & 7); buffer[bufferPos] |= 0x80U >> (bufferBits & 7);
bufferPos++; bufferPos++;
@ -1088,17 +1081,8 @@ static void wp512_final(void *ctx, u8 *out)
memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES],
bitLength, WP512_LENGTHBYTES); bitLength, WP512_LENGTHBYTES);
wp512_process_buffer(wctx); wp512_process_buffer(wctx);
for (i = 0; i < WP512_DIGEST_SIZE/8; i++) { for (i = 0; i < WP512_DIGEST_SIZE/8; i++)
digest[0] = (u8)(wctx->hash[i] >> 56); digest[i] = cpu_to_be64(wctx->hash[i]);
digest[1] = (u8)(wctx->hash[i] >> 48);
digest[2] = (u8)(wctx->hash[i] >> 40);
digest[3] = (u8)(wctx->hash[i] >> 32);
digest[4] = (u8)(wctx->hash[i] >> 24);
digest[5] = (u8)(wctx->hash[i] >> 16);
digest[6] = (u8)(wctx->hash[i] >> 8);
digest[7] = (u8)(wctx->hash[i] );
digest += 8;
}
wctx->bufferBits = bufferBits; wctx->bufferBits = bufferBits;
wctx->bufferPos = bufferPos; wctx->bufferPos = bufferPos;
} }

View File

@ -99,9 +99,6 @@ byte(const uint32_t x, const unsigned n)
return x >> (n << 3); return x >> (n << 3);
} }
#define uint32_t_in(x) le32_to_cpu(*(const uint32_t *)(x))
#define uint32_t_out(to, from) (*(uint32_t *)(to) = cpu_to_le32(from))
#define E_KEY ctx->E #define E_KEY ctx->E
#define D_KEY ctx->D #define D_KEY ctx->D
@ -294,6 +291,7 @@ static int
aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags)
{ {
struct aes_ctx *ctx = aes_ctx(ctx_arg); struct aes_ctx *ctx = aes_ctx(ctx_arg);
const __le32 *key = (const __le32 *)in_key;
uint32_t i, t, u, v, w; uint32_t i, t, u, v, w;
uint32_t P[AES_EXTENDED_KEY_SIZE]; uint32_t P[AES_EXTENDED_KEY_SIZE];
uint32_t rounds; uint32_t rounds;
@ -313,10 +311,10 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t
ctx->E = ctx->e_data; ctx->E = ctx->e_data;
ctx->D = ctx->e_data; ctx->D = ctx->e_data;
E_KEY[0] = uint32_t_in (in_key); E_KEY[0] = le32_to_cpu(key[0]);
E_KEY[1] = uint32_t_in (in_key + 4); E_KEY[1] = le32_to_cpu(key[1]);
E_KEY[2] = uint32_t_in (in_key + 8); E_KEY[2] = le32_to_cpu(key[2]);
E_KEY[3] = uint32_t_in (in_key + 12); E_KEY[3] = le32_to_cpu(key[3]);
/* Prepare control words. */ /* Prepare control words. */
memset(&ctx->cword, 0, sizeof(ctx->cword)); memset(&ctx->cword, 0, sizeof(ctx->cword));
@ -343,17 +341,17 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t
break; break;
case 24: case 24:
E_KEY[4] = uint32_t_in (in_key + 16); E_KEY[4] = le32_to_cpu(key[4]);
t = E_KEY[5] = uint32_t_in (in_key + 20); t = E_KEY[5] = le32_to_cpu(key[5]);
for (i = 0; i < 8; ++i) for (i = 0; i < 8; ++i)
loop6 (i); loop6 (i);
break; break;
case 32: case 32:
E_KEY[4] = uint32_t_in (in_key + 16); E_KEY[4] = le32_to_cpu(in_key[4]);
E_KEY[5] = uint32_t_in (in_key + 20); E_KEY[5] = le32_to_cpu(in_key[5]);
E_KEY[6] = uint32_t_in (in_key + 24); E_KEY[6] = le32_to_cpu(in_key[6]);
t = E_KEY[7] = uint32_t_in (in_key + 28); t = E_KEY[7] = le32_to_cpu(in_key[7]);
for (i = 0; i < 7; ++i) for (i = 0; i < 7; ++i)
loop8 (i); loop8 (i);
break; break;
@ -468,6 +466,8 @@ static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
static struct crypto_alg aes_alg = { static struct crypto_alg aes_alg = {
.cra_name = "aes", .cra_name = "aes",
.cra_driver_name = "aes-padlock",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx), .cra_ctxsize = sizeof(struct aes_ctx),

View File

@ -17,7 +17,7 @@
/* Control word. */ /* Control word. */
struct cword { struct cword {
int __attribute__ ((__packed__)) unsigned int __attribute__ ((__packed__))
rounds:4, rounds:4,
algo:3, algo:3,
keygen:1, keygen:1,

View File

@ -27,6 +27,19 @@ config NETDEVICES
# that for each of the symbols. # that for each of the symbols.
if NETDEVICES if NETDEVICES
config IFB
tristate "Intermediate Functional Block support"
depends on NET_CLS_ACT
---help---
This is an intermidiate driver that allows sharing of
resources.
To compile this driver as a module, choose M here: the module
will be called ifb. If you want to use more than one ifb
device at a time, you need to compile this driver as a module.
Instead of 'ifb', the devices will then be called 'ifb0',
'ifb1' etc.
Look at the iproute2 documentation directory for usage etc
config DUMMY config DUMMY
tristate "Dummy net driver support" tristate "Dummy net driver support"
---help--- ---help---

View File

@ -125,6 +125,7 @@ ifeq ($(CONFIG_SLIP_COMPRESSED),y)
endif endif
obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_DE600) += de600.o obj-$(CONFIG_DE600) += de600.o
obj-$(CONFIG_DE620) += de620.o obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_LANCE) += lance.o obj-$(CONFIG_LANCE) += lance.o

View File

@ -515,6 +515,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
count = kiss_esc(p, (unsigned char *)ax->xbuff, len); count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
} }
} }
spin_unlock_bh(&ax->buflock);
set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
@ -524,7 +525,6 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
ax->dev->trans_start = jiffies; ax->dev->trans_start = jiffies;
ax->xleft = count - actual; ax->xleft = count - actual;
ax->xhead = ax->xbuff + actual; ax->xhead = ax->xbuff + actual;
spin_unlock_bh(&ax->buflock);
} }
/* Encapsulate an AX.25 packet and kick it into a TTY queue. */ /* Encapsulate an AX.25 packet and kick it into a TTY queue. */

294
drivers/net/ifb.c Normal file
View File

@ -0,0 +1,294 @@
/* drivers/net/ifb.c:
The purpose of this driver is to provide a device that allows
for sharing of resources:
1) qdiscs/policies that are per device as opposed to system wide.
ifb allows for a device which can be redirected to thus providing
an impression of sharing.
2) Allows for queueing incoming traffic for shaping instead of
dropping.
The original concept is based on what is known as the IMQ
driver initially written by Martin Devera, later rewritten
by Patrick McHardy and then maintained by Andre Correa.
You need the tc action mirror or redirect to feed this device
packets.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version
2 of the License, or (at your option) any later version.
Authors: Jamal Hadi Salim (2005)
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <net/pkt_sched.h>
#define TX_TIMEOUT (2*HZ)
#define TX_Q_LIMIT 32
struct ifb_private {
struct net_device_stats stats;
struct tasklet_struct ifb_tasklet;
int tasklet_pending;
/* mostly debug stats leave in for now */
unsigned long st_task_enter; /* tasklet entered */
unsigned long st_txq_refl_try; /* transmit queue refill attempt */
unsigned long st_rxq_enter; /* receive queue entered */
unsigned long st_rx2tx_tran; /* receive to trasmit transfers */
unsigned long st_rxq_notenter; /*receiveQ not entered, resched */
unsigned long st_rx_frm_egr; /* received from egress path */
unsigned long st_rx_frm_ing; /* received from ingress path */
unsigned long st_rxq_check;
unsigned long st_rxq_rsch;
struct sk_buff_head rq;
struct sk_buff_head tq;
};
static int numifbs = 1;
static void ri_tasklet(unsigned long dev);
static int ifb_xmit(struct sk_buff *skb, struct net_device *dev);
static struct net_device_stats *ifb_get_stats(struct net_device *dev);
static int ifb_open(struct net_device *dev);
static int ifb_close(struct net_device *dev);
static void ri_tasklet(unsigned long dev)
{
struct net_device *_dev = (struct net_device *)dev;
struct ifb_private *dp = netdev_priv(_dev);
struct net_device_stats *stats = &dp->stats;
struct sk_buff *skb;
dp->st_task_enter++;
if ((skb = skb_peek(&dp->tq)) == NULL) {
dp->st_txq_refl_try++;
if (spin_trylock(&_dev->xmit_lock)) {
dp->st_rxq_enter++;
while ((skb = skb_dequeue(&dp->rq)) != NULL) {
skb_queue_tail(&dp->tq, skb);
dp->st_rx2tx_tran++;
}
spin_unlock(&_dev->xmit_lock);
} else {
/* reschedule */
dp->st_rxq_notenter++;
goto resched;
}
}
while ((skb = skb_dequeue(&dp->tq)) != NULL) {
u32 from = G_TC_FROM(skb->tc_verd);
skb->tc_verd = 0;
skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
stats->tx_packets++;
stats->tx_bytes +=skb->len;
if (from & AT_EGRESS) {
dp->st_rx_frm_egr++;
dev_queue_xmit(skb);
} else if (from & AT_INGRESS) {
dp->st_rx_frm_ing++;
netif_rx(skb);
} else {
dev_kfree_skb(skb);
stats->tx_dropped++;
}
}
if (spin_trylock(&_dev->xmit_lock)) {
dp->st_rxq_check++;
if ((skb = skb_peek(&dp->rq)) == NULL) {
dp->tasklet_pending = 0;
if (netif_queue_stopped(_dev))
netif_wake_queue(_dev);
} else {
dp->st_rxq_rsch++;
spin_unlock(&_dev->xmit_lock);
goto resched;
}
spin_unlock(&_dev->xmit_lock);
} else {
resched:
dp->tasklet_pending = 1;
tasklet_schedule(&dp->ifb_tasklet);
}
}
static void __init ifb_setup(struct net_device *dev)
{
/* Initialize the device structure. */
dev->get_stats = ifb_get_stats;
dev->hard_start_xmit = ifb_xmit;
dev->open = &ifb_open;
dev->stop = &ifb_close;
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
dev->tx_queue_len = TX_Q_LIMIT;
dev->change_mtu = NULL;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
SET_MODULE_OWNER(dev);
random_ether_addr(dev->dev_addr);
}
static int ifb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ifb_private *dp = netdev_priv(dev);
struct net_device_stats *stats = &dp->stats;
int ret = 0;
u32 from = G_TC_FROM(skb->tc_verd);
stats->tx_packets++;
stats->tx_bytes+=skb->len;
if (!from || !skb->input_dev) {
dropped:
dev_kfree_skb(skb);
stats->rx_dropped++;
return ret;
} else {
/*
* note we could be going
* ingress -> egress or
* egress -> ingress
*/
skb->dev = skb->input_dev;
skb->input_dev = dev;
if (from & AT_INGRESS) {
skb_pull(skb, skb->dev->hard_header_len);
} else {
if (!(from & AT_EGRESS)) {
goto dropped;
}
}
}
if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
netif_stop_queue(dev);
}
dev->trans_start = jiffies;
skb_queue_tail(&dp->rq, skb);
if (!dp->tasklet_pending) {
dp->tasklet_pending = 1;
tasklet_schedule(&dp->ifb_tasklet);
}
return ret;
}
static struct net_device_stats *ifb_get_stats(struct net_device *dev)
{
struct ifb_private *dp = netdev_priv(dev);
struct net_device_stats *stats = &dp->stats;
pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n",
dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter,
dp->st_rx2tx_tran dp->st_rxq_notenter, dp->st_rx_frm_egr,
dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch );
return stats;
}
static struct net_device **ifbs;
/* Number of ifb devices to be set up by this module. */
module_param(numifbs, int, 0);
MODULE_PARM_DESC(numifbs, "Number of ifb devices");
static int ifb_close(struct net_device *dev)
{
struct ifb_private *dp = netdev_priv(dev);
tasklet_kill(&dp->ifb_tasklet);
netif_stop_queue(dev);
skb_queue_purge(&dp->rq);
skb_queue_purge(&dp->tq);
return 0;
}
static int ifb_open(struct net_device *dev)
{
struct ifb_private *dp = netdev_priv(dev);
tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
skb_queue_head_init(&dp->rq);
skb_queue_head_init(&dp->tq);
netif_start_queue(dev);
return 0;
}
static int __init ifb_init_one(int index)
{
struct net_device *dev_ifb;
int err;
dev_ifb = alloc_netdev(sizeof(struct ifb_private),
"ifb%d", ifb_setup);
if (!dev_ifb)
return -ENOMEM;
if ((err = register_netdev(dev_ifb))) {
free_netdev(dev_ifb);
dev_ifb = NULL;
} else {
ifbs[index] = dev_ifb;
}
return err;
}
static void ifb_free_one(int index)
{
unregister_netdev(ifbs[index]);
free_netdev(ifbs[index]);
}
static int __init ifb_init_module(void)
{
int i, err = 0;
ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL);
if (!ifbs)
return -ENOMEM;
for (i = 0; i < numifbs && !err; i++)
err = ifb_init_one(i);
if (err) {
while (--i >= 0)
ifb_free_one(i);
}
return err;
}
static void __exit ifb_cleanup_module(void)
{
int i;
for (i = 0; i < numifbs; i++)
ifb_free_one(i);
kfree(ifbs);
}
module_init(ifb_init_module);
module_exit(ifb_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamal Hadi Salim");

View File

@ -3,6 +3,7 @@
* *
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 David S. Miller (davem@redhat.com) * Copyright (c) 2002 David S. Miller (davem@redhat.com)
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
* *
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Möller. * and Nettle, by Niels Möller.
@ -126,7 +127,11 @@ struct crypto_alg {
unsigned int cra_blocksize; unsigned int cra_blocksize;
unsigned int cra_ctxsize; unsigned int cra_ctxsize;
unsigned int cra_alignmask; unsigned int cra_alignmask;
int cra_priority;
const char cra_name[CRYPTO_MAX_ALG_NAME]; const char cra_name[CRYPTO_MAX_ALG_NAME];
const char cra_driver_name[CRYPTO_MAX_ALG_NAME];
union { union {
struct cipher_alg cipher; struct cipher_alg cipher;

View File

@ -63,7 +63,7 @@ struct tc_action_ops
__u32 type; /* TBD to match kind */ __u32 type; /* TBD to match kind */
__u32 capab; /* capabilities includes 4 bit version */ __u32 capab; /* capabilities includes 4 bit version */
struct module *owner; struct module *owner;
int (*act)(struct sk_buff **, struct tc_action *, struct tcf_result *); int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *);
int (*get_stats)(struct sk_buff *, struct tc_action *); int (*get_stats)(struct sk_buff *, struct tc_action *);
int (*dump)(struct sk_buff *, struct tc_action *,int , int); int (*dump)(struct sk_buff *, struct tc_action *,int , int);
int (*cleanup)(struct tc_action *, int bind); int (*cleanup)(struct tc_action *, int bind);

View File

@ -1,6 +1,7 @@
#ifndef __NET_PKT_SCHED_H #ifndef __NET_PKT_SCHED_H
#define __NET_PKT_SCHED_H #define __NET_PKT_SCHED_H
#include <linux/jiffies.h>
#include <net/sch_generic.h> #include <net/sch_generic.h>
struct qdisc_walker struct qdisc_walker
@ -59,8 +60,8 @@ typedef struct timeval psched_time_t;
typedef long psched_tdiff_t; typedef long psched_tdiff_t;
#define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp)) #define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp))
#define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ)) #define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(usecs)
#define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ)) #define PSCHED_JIFFIE2US(delay) jiffies_to_usecs(delay)
#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ #else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
@ -123,9 +124,9 @@ do { \
default: \ default: \
__delta = 0; \ __delta = 0; \
case 2: \ case 2: \
__delta += 1000000; \ __delta += USEC_PER_SEC; \
case 1: \ case 1: \
__delta += 1000000; \ __delta += USEC_PER_SEC; \
} \ } \
} \ } \
__delta; \ __delta; \
@ -136,9 +137,9 @@ psched_tod_diff(int delta_sec, int bound)
{ {
int delta; int delta;
if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1) if (bound <= USEC_PER_SEC || delta_sec > (0x7FFFFFFF/USEC_PER_SEC)-1)
return bound; return bound;
delta = delta_sec * 1000000; delta = delta_sec * USEC_PER_SEC;
if (delta > bound || delta < 0) if (delta > bound || delta < 0)
delta = bound; delta = bound;
return delta; return delta;
@ -152,9 +153,9 @@ psched_tod_diff(int delta_sec, int bound)
default: \ default: \
__delta = psched_tod_diff(__delta_sec, bound); break; \ __delta = psched_tod_diff(__delta_sec, bound); break; \
case 2: \ case 2: \
__delta += 1000000; \ __delta += USEC_PER_SEC; \
case 1: \ case 1: \
__delta += 1000000; \ __delta += USEC_PER_SEC; \
case 0: \ case 0: \
if (__delta > bound || __delta < 0) \ if (__delta > bound || __delta < 0) \
__delta = bound; \ __delta = bound; \
@ -170,15 +171,15 @@ psched_tod_diff(int delta_sec, int bound)
({ \ ({ \
int __delta = (tv).tv_usec + (delta); \ int __delta = (tv).tv_usec + (delta); \
(tv_res).tv_sec = (tv).tv_sec; \ (tv_res).tv_sec = (tv).tv_sec; \
if (__delta > 1000000) { (tv_res).tv_sec++; __delta -= 1000000; } \ if (__delta > USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \
(tv_res).tv_usec = __delta; \ (tv_res).tv_usec = __delta; \
}) })
#define PSCHED_TADD(tv, delta) \ #define PSCHED_TADD(tv, delta) \
({ \ ({ \
(tv).tv_usec += (delta); \ (tv).tv_usec += (delta); \
if ((tv).tv_usec > 1000000) { (tv).tv_sec++; \ if ((tv).tv_usec > USEC_PER_SEC) { (tv).tv_sec++; \
(tv).tv_usec -= 1000000; } \ (tv).tv_usec -= USEC_PER_SEC; } \
}) })
/* Set/check that time is in the "past perfect"; /* Set/check that time is in the "past perfect";

View File

@ -1092,15 +1092,12 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
goto out; goto out;
} }
if (offset > (int)skb->len) BUG_ON(offset > (int)skb->len);
BUG();
csum = skb_checksum(skb, offset, skb->len-offset, 0); csum = skb_checksum(skb, offset, skb->len-offset, 0);
offset = skb->tail - skb->h.raw; offset = skb->tail - skb->h.raw;
if (offset <= 0) BUG_ON(offset <= 0);
BUG(); BUG_ON(skb->csum + 2 > offset);
if (skb->csum + 2 > offset)
BUG();
*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;

View File

@ -791,8 +791,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
int end = offset + skb_shinfo(skb)->frags[i].size; int end = offset + skb_shinfo(skb)->frags[i].size;
if (end > len) { if (end > len) {
if (skb_cloned(skb)) { if (skb_cloned(skb)) {
if (!realloc) BUG_ON(!realloc);
BUG();
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return -ENOMEM; return -ENOMEM;
} }
@ -894,8 +893,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
struct sk_buff *insp = NULL; struct sk_buff *insp = NULL;
do { do {
if (!list) BUG_ON(!list);
BUG();
if (list->len <= eat) { if (list->len <= eat) {
/* Eaten as whole. */ /* Eaten as whole. */
@ -1199,8 +1197,7 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset,
start = end; start = end;
} }
} }
if (len) BUG_ON(len);
BUG();
return csum; return csum;
} }
@ -1282,8 +1279,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
start = end; start = end;
} }
} }
if (len) BUG_ON(len);
BUG();
return csum; return csum;
} }
@ -1297,8 +1293,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
else else
csstart = skb_headlen(skb); csstart = skb_headlen(skb);
if (csstart > skb_headlen(skb)) BUG_ON(csstart > skb_headlen(skb));
BUG();
memcpy(to, skb->data, csstart); memcpy(to, skb->data, csstart);

View File

@ -899,8 +899,7 @@ static void icmp_address_reply(struct sk_buff *skb)
u32 _mask, *mp; u32 _mask, *mp;
mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
if (mp == NULL) BUG_ON(mp == NULL);
BUG();
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
if (*mp == ifa->ifa_mask && if (*mp == ifa->ifa_mask &&
inet_ifa_match(rt->rt_src, ifa)) inet_ifa_match(rt->rt_src, ifa))

View File

@ -50,7 +50,8 @@ static struct sock *idiagnl;
#define INET_DIAG_PUT(skb, attrtype, attrlen) \ #define INET_DIAG_PUT(skb, attrtype, attrlen) \
RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
static int inet_diag_fill(struct sk_buff *skb, struct sock *sk, static int inet_csk_diag_fill(struct sock *sk,
struct sk_buff *skb,
int ext, u32 pid, u32 seq, u16 nlmsg_flags, int ext, u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh) const struct nlmsghdr *unlh)
{ {
@ -70,20 +71,22 @@ static int inet_diag_fill(struct sk_buff *skb, struct sock *sk,
nlh->nlmsg_flags = nlmsg_flags; nlh->nlmsg_flags = nlmsg_flags;
r = NLMSG_DATA(nlh); r = NLMSG_DATA(nlh);
if (sk->sk_state != TCP_TIME_WAIT) { BUG_ON(sk->sk_state == TCP_TIME_WAIT);
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
sizeof(*minfo));
if (ext & (1 << (INET_DIAG_INFO - 1))) if (ext & (1 << (INET_DIAG_INFO - 1)))
info = INET_DIAG_PUT(skb, INET_DIAG_INFO, info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
handler->idiag_info_size); handler->idiag_info_size);
if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
size_t len = strlen(icsk->icsk_ca_ops->name); const size_t len = strlen(icsk->icsk_ca_ops->name);
strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
icsk->icsk_ca_ops->name); icsk->icsk_ca_ops->name);
} }
}
r->idiag_family = sk->sk_family; r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state; r->idiag_state = sk->sk_state;
r->idiag_timer = 0; r->idiag_timer = 0;
@ -93,37 +96,6 @@ static int inet_diag_fill(struct sk_buff *skb, struct sock *sk,
r->id.idiag_cookie[0] = (u32)(unsigned long)sk; r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
if (r->idiag_state == TCP_TIME_WAIT) {
const struct inet_timewait_sock *tw = inet_twsk(sk);
long tmo = tw->tw_ttd - jiffies;
if (tmo < 0)
tmo = 0;
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
r->id.idiag_src[0] = tw->tw_rcv_saddr;
r->id.idiag_dst[0] = tw->tw_daddr;
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ;
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
r->idiag_inode = 0;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (r->idiag_family == AF_INET6) {
const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
&tw6->tw_v6_rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
&tw6->tw_v6_daddr);
}
#endif
nlh->nlmsg_len = skb->tail - b;
return skb->len;
}
r->id.idiag_sport = inet->sport; r->id.idiag_sport = inet->sport;
r->id.idiag_dport = inet->dport; r->id.idiag_dport = inet->dport;
r->id.idiag_src[0] = inet->rcv_saddr; r->id.idiag_src[0] = inet->rcv_saddr;
@ -185,7 +157,75 @@ static int inet_diag_fill(struct sk_buff *skb, struct sock *sk,
return -1; return -1;
} }
static int inet_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh) static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
struct sk_buff *skb, int ext, u32 pid,
u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
long tmo;
struct inet_diag_msg *r;
const unsigned char *previous_tail = skb->tail;
struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
unlh->nlmsg_type, sizeof(*r));
r = NLMSG_DATA(nlh);
BUG_ON(tw->tw_state != TCP_TIME_WAIT);
nlh->nlmsg_flags = nlmsg_flags;
tmo = tw->tw_ttd - jiffies;
if (tmo < 0)
tmo = 0;
r->idiag_family = tw->tw_family;
r->idiag_state = tw->tw_state;
r->idiag_timer = 0;
r->idiag_retrans = 0;
r->id.idiag_if = tw->tw_bound_dev_if;
r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
r->id.idiag_src[0] = tw->tw_rcv_saddr;
r->id.idiag_dst[0] = tw->tw_daddr;
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ;
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
r->idiag_inode = 0;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (tw->tw_family == AF_INET6) {
const struct inet6_timewait_sock *tw6 =
inet6_twsk((struct sock *)tw);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
&tw6->tw_v6_rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
&tw6->tw_v6_daddr);
}
#endif
nlh->nlmsg_len = skb->tail - previous_tail;
return skb->len;
nlmsg_failure:
skb_trim(skb, previous_tail - skb->data);
return -1;
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
int ext, u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
if (sk->sk_state == TCP_TIME_WAIT)
return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
skb, ext, pid, seq, nlmsg_flags,
unlh);
return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
}
static int inet_diag_get_exact(struct sk_buff *in_skb,
const struct nlmsghdr *nlh)
{ {
int err; int err;
struct sock *sk; struct sock *sk;
@ -235,7 +275,7 @@ static int inet_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nl
if (!rep) if (!rep)
goto out; goto out;
if (inet_diag_fill(rep, sk, req->idiag_ext, if (sk_diag_fill(sk, rep, req->idiag_ext,
NETLINK_CB(in_skb).pid, NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh) <= 0) nlh->nlmsg_seq, 0, nlh) <= 0)
BUG(); BUG();
@ -331,7 +371,8 @@ static int inet_diag_bc_run(const void *bc, int len,
else else
addr = entry->daddr; addr = entry->daddr;
if (bitstring_match(addr, cond->addr, cond->prefix_len)) if (bitstring_match(addr, cond->addr,
cond->prefix_len))
break; break;
if (entry->family == AF_INET6 && if (entry->family == AF_INET6 &&
cond->family == AF_INET) { cond->family == AF_INET) {
@ -413,7 +454,8 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
return len == 0 ? 0 : -EINVAL; return len == 0 ? 0 : -EINVAL;
} }
static int inet_diag_dump_sock(struct sk_buff *skb, struct sock *sk, static int inet_csk_diag_dump(struct sock *sk,
struct sk_buff *skb,
struct netlink_callback *cb) struct netlink_callback *cb)
{ {
struct inet_diag_req *r = NLMSG_DATA(cb->nlh); struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
@ -444,13 +486,49 @@ static int inet_diag_dump_sock(struct sk_buff *skb, struct sock *sk,
return 0; return 0;
} }
return inet_diag_fill(skb, sk, r->idiag_ext, NETLINK_CB(cb->skb).pid, return inet_csk_diag_fill(sk, skb, r->idiag_ext,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
struct sk_buff *skb,
struct netlink_callback *cb)
{
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
struct inet_diag_entry entry;
struct rtattr *bc = (struct rtattr *)(r + 1);
entry.family = tw->tw_family;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (tw->tw_family == AF_INET6) {
struct inet6_timewait_sock *tw6 =
inet6_twsk((struct sock *)tw);
entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
entry.daddr = tw6->tw_v6_daddr.s6_addr32;
} else
#endif
{
entry.saddr = &tw->tw_rcv_saddr;
entry.daddr = &tw->tw_daddr;
}
entry.sport = tw->tw_num;
entry.dport = ntohs(tw->tw_dport);
entry.userlocks = 0;
if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
return 0;
}
return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
} }
static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
struct request_sock *req, struct request_sock *req, u32 pid, u32 seq,
u32 pid, u32 seq,
const struct nlmsghdr *unlh) const struct nlmsghdr *unlh)
{ {
const struct inet_request_sock *ireq = inet_rsk(req); const struct inet_request_sock *ireq = inet_rsk(req);
@ -630,7 +708,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[3] > 0) cb->args[3] > 0)
goto syn_recv; goto syn_recv;
if (inet_diag_dump_sock(skb, sk, cb) < 0) { if (inet_csk_diag_dump(sk, skb, cb) < 0) {
inet_listen_unlock(hashinfo); inet_listen_unlock(hashinfo);
goto done; goto done;
} }
@ -672,7 +750,6 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
s_num = 0; s_num = 0;
read_lock_bh(&head->lock); read_lock_bh(&head->lock);
num = 0; num = 0;
sk_for_each(sk, node, &head->chain) { sk_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
@ -684,9 +761,10 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (r->id.idiag_sport != inet->sport && if (r->id.idiag_sport != inet->sport &&
r->id.idiag_sport) r->id.idiag_sport)
goto next_normal; goto next_normal;
if (r->id.idiag_dport != inet->dport && r->id.idiag_dport) if (r->id.idiag_dport != inet->dport &&
r->id.idiag_dport)
goto next_normal; goto next_normal;
if (inet_diag_dump_sock(skb, sk, cb) < 0) { if (inet_csk_diag_dump(sk, skb, cb) < 0) {
read_unlock_bh(&head->lock); read_unlock_bh(&head->lock);
goto done; goto done;
} }
@ -695,19 +773,20 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
} }
if (r->idiag_states & TCPF_TIME_WAIT) { if (r->idiag_states & TCPF_TIME_WAIT) {
sk_for_each(sk, node, struct inet_timewait_sock *tw;
inet_twsk_for_each(tw, node,
&hashinfo->ehash[i + hashinfo->ehash_size].chain) { &hashinfo->ehash[i + hashinfo->ehash_size].chain) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num) if (num < s_num)
goto next_dying; goto next_dying;
if (r->id.idiag_sport != inet->sport && if (r->id.idiag_sport != tw->tw_sport &&
r->id.idiag_sport) r->id.idiag_sport)
goto next_dying; goto next_dying;
if (r->id.idiag_dport != inet->dport && if (r->id.idiag_dport != tw->tw_dport &&
r->id.idiag_dport) r->id.idiag_dport)
goto next_dying; goto next_dying;
if (inet_diag_dump_sock(skb, sk, cb) < 0) { if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
read_unlock_bh(&head->lock); read_unlock_bh(&head->lock);
goto done; goto done;
} }
@ -724,8 +803,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len; return skb->len;
} }
static __inline__ int static inline int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{ {
if (!(nlh->nlmsg_flags&NLM_F_REQUEST)) if (!(nlh->nlmsg_flags&NLM_F_REQUEST))
return 0; return 0;
@ -755,9 +833,8 @@ inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
} }
return netlink_dump_start(idiagnl, skb, nlh, return netlink_dump_start(idiagnl, skb, nlh,
inet_diag_dump, NULL); inet_diag_dump, NULL);
} else { } else
return inet_diag_get_exact(skb, nlh); return inet_diag_get_exact(skb, nlh);
}
err_inval: err_inval:
return -EINVAL; return -EINVAL;
@ -766,12 +843,12 @@ inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
static inline void inet_diag_rcv_skb(struct sk_buff *skb) static inline void inet_diag_rcv_skb(struct sk_buff *skb)
{ {
int err;
struct nlmsghdr * nlh;
if (skb->len >= NLMSG_SPACE(0)) { if (skb->len >= NLMSG_SPACE(0)) {
nlh = (struct nlmsghdr *)skb->data; int err;
if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
if (nlh->nlmsg_len < sizeof(*nlh) ||
skb->len < nlh->nlmsg_len)
return; return;
err = inet_diag_rcv_msg(skb, nlh); err = inet_diag_rcv_msg(skb, nlh);
if (err || nlh->nlmsg_flags & NLM_F_ACK) if (err || nlh->nlmsg_flags & NLM_F_ACK)

View File

@ -304,8 +304,7 @@ static void unlink_from_pool(struct inet_peer *p)
/* look for a node to insert instead of p */ /* look for a node to insert instead of p */
struct inet_peer *t; struct inet_peer *t;
t = lookup_rightempty(p); t = lookup_rightempty(p);
if (*stackptr[-1] != t) BUG_ON(*stackptr[-1] != t);
BUG();
**--stackptr = t->avl_left; **--stackptr = t->avl_left;
/* t is removed, t->v4daddr > x->v4daddr for any /* t is removed, t->v4daddr > x->v4daddr for any
* x in p->avl_left subtree. * x in p->avl_left subtree.
@ -314,8 +313,7 @@ static void unlink_from_pool(struct inet_peer *p)
t->avl_left = p->avl_left; t->avl_left = p->avl_left;
t->avl_right = p->avl_right; t->avl_right = p->avl_right;
t->avl_height = p->avl_height; t->avl_height = p->avl_height;
if (delp[1] != &p->avl_left) BUG_ON(delp[1] != &p->avl_left);
BUG();
delp[1] = &t->avl_left; /* was &p->avl_left */ delp[1] = &t->avl_left; /* was &p->avl_left */
} }
peer_avl_rebalance(stack, stackptr); peer_avl_rebalance(stack, stackptr);

View File

@ -188,7 +188,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key)
} }
if (ipgre_fb_tunnel_dev->flags&IFF_UP) if (ipgre_fb_tunnel_dev->flags&IFF_UP)
return ipgre_fb_tunnel_dev->priv; return netdev_priv(ipgre_fb_tunnel_dev);
return NULL; return NULL;
} }
@ -278,7 +278,7 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
return NULL; return NULL;
dev->init = ipgre_tunnel_init; dev->init = ipgre_tunnel_init;
nt = dev->priv; nt = netdev_priv(dev);
nt->parms = *parms; nt->parms = *parms;
if (register_netdevice(dev) < 0) { if (register_netdevice(dev) < 0) {
@ -286,9 +286,6 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
goto failed; goto failed;
} }
nt = dev->priv;
nt->parms = *parms;
dev_hold(dev); dev_hold(dev);
ipgre_tunnel_link(nt); ipgre_tunnel_link(nt);
return nt; return nt;
@ -299,7 +296,7 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
static void ipgre_tunnel_uninit(struct net_device *dev) static void ipgre_tunnel_uninit(struct net_device *dev)
{ {
ipgre_tunnel_unlink((struct ip_tunnel*)dev->priv); ipgre_tunnel_unlink(netdev_priv(dev));
dev_put(dev); dev_put(dev);
} }
@ -518,7 +515,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
skb2->dst->ops->update_pmtu(skb2->dst, rel_info); skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
rel_info = htonl(rel_info); rel_info = htonl(rel_info);
} else if (type == ICMP_TIME_EXCEEDED) { } else if (type == ICMP_TIME_EXCEEDED) {
struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv; struct ip_tunnel *t = netdev_priv(skb2->dev);
if (t->parms.iph.ttl) { if (t->parms.iph.ttl) {
rel_type = ICMP_DEST_UNREACH; rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_HOST_UNREACH; rel_code = ICMP_HOST_UNREACH;
@ -669,7 +666,7 @@ static int ipgre_rcv(struct sk_buff *skb)
static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->stat; struct net_device_stats *stats = &tunnel->stat;
struct iphdr *old_iph = skb->nh.iph; struct iphdr *old_iph = skb->nh.iph;
struct iphdr *tiph; struct iphdr *tiph;
@ -915,7 +912,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
t = ipgre_tunnel_locate(&p, 0); t = ipgre_tunnel_locate(&p, 0);
} }
if (t == NULL) if (t == NULL)
t = (struct ip_tunnel*)dev->priv; t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof(p)); memcpy(&p, &t->parms, sizeof(p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT; err = -EFAULT;
@ -955,7 +952,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
} else { } else {
unsigned nflags=0; unsigned nflags=0;
t = (struct ip_tunnel*)dev->priv; t = netdev_priv(dev);
if (MULTICAST(p.iph.daddr)) if (MULTICAST(p.iph.daddr))
nflags = IFF_BROADCAST; nflags = IFF_BROADCAST;
@ -1004,7 +1001,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
if ((t = ipgre_tunnel_locate(&p, 0)) == NULL) if ((t = ipgre_tunnel_locate(&p, 0)) == NULL)
goto done; goto done;
err = -EPERM; err = -EPERM;
if (t == ipgre_fb_tunnel_dev->priv) if (t == netdev_priv(ipgre_fb_tunnel_dev))
goto done; goto done;
dev = t->dev; dev = t->dev;
} }
@ -1021,12 +1018,12 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev) static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
{ {
return &(((struct ip_tunnel*)dev->priv)->stat); return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
} }
static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{ {
struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; struct ip_tunnel *tunnel = netdev_priv(dev);
if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen) if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen)
return -EINVAL; return -EINVAL;
dev->mtu = new_mtu; dev->mtu = new_mtu;
@ -1066,7 +1063,7 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
void *daddr, void *saddr, unsigned len) void *daddr, void *saddr, unsigned len)
{ {
struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; struct ip_tunnel *t = netdev_priv(dev);
struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
u16 *p = (u16*)(iph+1); u16 *p = (u16*)(iph+1);
@ -1093,7 +1090,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh
static int ipgre_open(struct net_device *dev) static int ipgre_open(struct net_device *dev)
{ {
struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; struct ip_tunnel *t = netdev_priv(dev);
if (MULTICAST(t->parms.iph.daddr)) { if (MULTICAST(t->parms.iph.daddr)) {
struct flowi fl = { .oif = t->parms.link, struct flowi fl = { .oif = t->parms.link,
@ -1117,7 +1114,7 @@ static int ipgre_open(struct net_device *dev)
static int ipgre_close(struct net_device *dev) static int ipgre_close(struct net_device *dev)
{ {
struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; struct ip_tunnel *t = netdev_priv(dev);
if (MULTICAST(t->parms.iph.daddr) && t->mlink) { if (MULTICAST(t->parms.iph.daddr) && t->mlink) {
struct in_device *in_dev = inetdev_by_index(t->mlink); struct in_device *in_dev = inetdev_by_index(t->mlink);
if (in_dev) { if (in_dev) {
@ -1157,7 +1154,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
int mtu = ETH_DATA_LEN; int mtu = ETH_DATA_LEN;
int addend = sizeof(struct iphdr) + 4; int addend = sizeof(struct iphdr) + 4;
tunnel = (struct ip_tunnel*)dev->priv; tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph; iph = &tunnel->parms.iph;
tunnel->dev = dev; tunnel->dev = dev;
@ -1221,7 +1218,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
static int __init ipgre_fb_tunnel_init(struct net_device *dev) static int __init ipgre_fb_tunnel_init(struct net_device *dev)
{ {
struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph; struct iphdr *iph = &tunnel->parms.iph;
tunnel->dev = dev; tunnel->dev = dev;

View File

@ -69,6 +69,7 @@
#include <net/ip.h> #include <net/ip.h>
#include <net/protocol.h> #include <net/protocol.h>
#include <net/route.h> #include <net/route.h>
#include <net/xfrm.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/arp.h> #include <net/arp.h>

View File

@ -244,7 +244,7 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c
if (dev == NULL) if (dev == NULL)
return NULL; return NULL;
nt = dev->priv; nt = netdev_priv(dev);
SET_MODULE_OWNER(dev); SET_MODULE_OWNER(dev);
dev->init = ipip_tunnel_init; dev->init = ipip_tunnel_init;
nt->parms = *parms; nt->parms = *parms;
@ -269,7 +269,7 @@ static void ipip_tunnel_uninit(struct net_device *dev)
tunnels_wc[0] = NULL; tunnels_wc[0] = NULL;
write_unlock_bh(&ipip_lock); write_unlock_bh(&ipip_lock);
} else } else
ipip_tunnel_unlink((struct ip_tunnel*)dev->priv); ipip_tunnel_unlink(netdev_priv(dev));
dev_put(dev); dev_put(dev);
} }
@ -443,7 +443,7 @@ static void ipip_err(struct sk_buff *skb, u32 info)
skb2->dst->ops->update_pmtu(skb2->dst, rel_info); skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
rel_info = htonl(rel_info); rel_info = htonl(rel_info);
} else if (type == ICMP_TIME_EXCEEDED) { } else if (type == ICMP_TIME_EXCEEDED) {
struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv; struct ip_tunnel *t = netdev_priv(skb2->dev);
if (t->parms.iph.ttl) { if (t->parms.iph.ttl) {
rel_type = ICMP_DEST_UNREACH; rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_HOST_UNREACH; rel_code = ICMP_HOST_UNREACH;
@ -514,7 +514,7 @@ static int ipip_rcv(struct sk_buff *skb)
static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->stat; struct net_device_stats *stats = &tunnel->stat;
struct iphdr *tiph = &tunnel->parms.iph; struct iphdr *tiph = &tunnel->parms.iph;
u8 tos = tunnel->parms.iph.tos; u8 tos = tunnel->parms.iph.tos;
@ -674,7 +674,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
t = ipip_tunnel_locate(&p, 0); t = ipip_tunnel_locate(&p, 0);
} }
if (t == NULL) if (t == NULL)
t = (struct ip_tunnel*)dev->priv; t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof(p)); memcpy(&p, &t->parms, sizeof(p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT; err = -EFAULT;
@ -711,7 +711,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL; err = -EINVAL;
break; break;
} }
t = (struct ip_tunnel*)dev->priv; t = netdev_priv(dev);
ipip_tunnel_unlink(t); ipip_tunnel_unlink(t);
t->parms.iph.saddr = p.iph.saddr; t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr; t->parms.iph.daddr = p.iph.daddr;
@ -765,7 +765,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev) static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev)
{ {
return &(((struct ip_tunnel*)dev->priv)->stat); return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
} }
static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
@ -800,7 +800,7 @@ static int ipip_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel; struct ip_tunnel *tunnel;
struct iphdr *iph; struct iphdr *iph;
tunnel = (struct ip_tunnel*)dev->priv; tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph; iph = &tunnel->parms.iph;
tunnel->dev = dev; tunnel->dev = dev;
@ -838,7 +838,7 @@ static int ipip_tunnel_init(struct net_device *dev)
static int __init ipip_fb_tunnel_init(struct net_device *dev) static int __init ipip_fb_tunnel_init(struct net_device *dev)
{ {
struct ip_tunnel *tunnel = dev->priv; struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph; struct iphdr *iph = &tunnel->parms.iph;
tunnel->dev = dev; tunnel->dev = dev;

View File

@ -178,8 +178,8 @@ static int reg_vif_num = -1;
static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
read_lock(&mrt_lock); read_lock(&mrt_lock);
((struct net_device_stats*)dev->priv)->tx_bytes += skb->len; ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len;
((struct net_device_stats*)dev->priv)->tx_packets++; ((struct net_device_stats*)netdev_priv(dev))->tx_packets++;
ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
read_unlock(&mrt_lock); read_unlock(&mrt_lock);
kfree_skb(skb); kfree_skb(skb);
@ -188,7 +188,7 @@ static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
static struct net_device_stats *reg_vif_get_stats(struct net_device *dev) static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
{ {
return (struct net_device_stats*)dev->priv; return (struct net_device_stats*)netdev_priv(dev);
} }
static void reg_vif_setup(struct net_device *dev) static void reg_vif_setup(struct net_device *dev)
@ -1149,8 +1149,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
if (vif->flags & VIFF_REGISTER) { if (vif->flags & VIFF_REGISTER) {
vif->pkt_out++; vif->pkt_out++;
vif->bytes_out+=skb->len; vif->bytes_out+=skb->len;
((struct net_device_stats*)vif->dev->priv)->tx_bytes += skb->len; ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len;
((struct net_device_stats*)vif->dev->priv)->tx_packets++; ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++;
ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
kfree_skb(skb); kfree_skb(skb);
return; return;
@ -1210,8 +1210,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
if (vif->flags & VIFF_TUNNEL) { if (vif->flags & VIFF_TUNNEL) {
ip_encap(skb, vif->local, vif->remote); ip_encap(skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */ /* FIXME: extra output firewall step used to be here. --RR */
((struct ip_tunnel *)vif->dev->priv)->stat.tx_packets++; ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++;
((struct ip_tunnel *)vif->dev->priv)->stat.tx_bytes+=skb->len; ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len;
} }
IPCB(skb)->flags |= IPSKB_FORWARDED; IPCB(skb)->flags |= IPSKB_FORWARDED;
@ -1467,8 +1467,8 @@ int pim_rcv_v1(struct sk_buff * skb)
skb->pkt_type = PACKET_HOST; skb->pkt_type = PACKET_HOST;
dst_release(skb->dst); dst_release(skb->dst);
skb->dst = NULL; skb->dst = NULL;
((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len; ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
((struct net_device_stats*)reg_dev->priv)->rx_packets++; ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
nf_reset(skb); nf_reset(skb);
netif_rx(skb); netif_rx(skb);
dev_put(reg_dev); dev_put(reg_dev);
@ -1522,8 +1522,8 @@ static int pim_rcv(struct sk_buff * skb)
skb->ip_summed = 0; skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST; skb->pkt_type = PACKET_HOST;
dst_release(skb->dst); dst_release(skb->dst);
((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len; ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
((struct net_device_stats*)reg_dev->priv)->rx_packets++; ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
skb->dst = NULL; skb->dst = NULL;
nf_reset(skb); nf_reset(skb);
netif_rx(skb); netif_rx(skb);

View File

@ -3347,7 +3347,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
int offset = start - TCP_SKB_CB(skb)->seq; int offset = start - TCP_SKB_CB(skb)->seq;
int size = TCP_SKB_CB(skb)->end_seq - start; int size = TCP_SKB_CB(skb)->end_seq - start;
if (offset < 0) BUG(); BUG_ON(offset < 0);
if (size > 0) { if (size > 0) {
size = min(copy, size); size = min(copy, size);
if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))

View File

@ -226,6 +226,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
ipv6_addr_copy(&hdr->daddr, first_hop); ipv6_addr_copy(&hdr->daddr, first_hop);
skb->priority = sk->sk_priority;
mtu = dst_mtu(dst); mtu = dst_mtu(dst);
if ((skb->len <= mtu) || ipfragok) { if ((skb->len <= mtu) || ipfragok) {
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
@ -1182,6 +1184,8 @@ int ip6_push_pending_frames(struct sock *sk)
ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
ipv6_addr_copy(&hdr->daddr, final_dst); ipv6_addr_copy(&hdr->daddr, final_dst);
skb->priority = sk->sk_priority;
skb->dst = dst_clone(&rt->u.dst); skb->dst = dst_clone(&rt->u.dst);
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);

View File

@ -243,7 +243,7 @@ ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt)
if (dev == NULL) if (dev == NULL)
return -ENOMEM; return -ENOMEM;
t = dev->priv; t = netdev_priv(dev);
dev->init = ip6ip6_tnl_dev_init; dev->init = ip6ip6_tnl_dev_init;
t->parms = *p; t->parms = *p;
@ -308,7 +308,7 @@ ip6ip6_tnl_locate(struct ip6_tnl_parm *p, struct ip6_tnl **pt, int create)
static void static void
ip6ip6_tnl_dev_uninit(struct net_device *dev) ip6ip6_tnl_dev_uninit(struct net_device *dev)
{ {
struct ip6_tnl *t = dev->priv; struct ip6_tnl *t = netdev_priv(dev);
if (dev == ip6ip6_fb_tnl_dev) { if (dev == ip6ip6_fb_tnl_dev) {
write_lock_bh(&ip6ip6_lock); write_lock_bh(&ip6ip6_lock);
@ -623,7 +623,7 @@ ip6ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr)
static int static int
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->stat; struct net_device_stats *stats = &t->stat;
struct ipv6hdr *ipv6h = skb->nh.ipv6h; struct ipv6hdr *ipv6h = skb->nh.ipv6h;
struct ipv6_txoptions *opt = NULL; struct ipv6_txoptions *opt = NULL;
@ -933,11 +933,11 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break; break;
} }
if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV) if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV)
t = (struct ip6_tnl *) dev->priv; t = netdev_priv(dev);
else if (err) else if (err)
break; break;
} else } else
t = (struct ip6_tnl *) dev->priv; t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof (p)); memcpy(&p, &t->parms, sizeof (p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
@ -955,7 +955,7 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break; break;
} }
if (!create && dev != ip6ip6_fb_tnl_dev) { if (!create && dev != ip6ip6_fb_tnl_dev) {
t = (struct ip6_tnl *) dev->priv; t = netdev_priv(dev);
} }
if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) { if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) {
break; break;
@ -991,12 +991,12 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
err = ip6ip6_tnl_locate(&p, &t, 0); err = ip6ip6_tnl_locate(&p, &t, 0);
if (err) if (err)
break; break;
if (t == ip6ip6_fb_tnl_dev->priv) { if (t == netdev_priv(ip6ip6_fb_tnl_dev)) {
err = -EPERM; err = -EPERM;
break; break;
} }
} else { } else {
t = (struct ip6_tnl *) dev->priv; t = netdev_priv(dev);
} }
err = unregister_netdevice(t->dev); err = unregister_netdevice(t->dev);
break; break;
@ -1016,7 +1016,7 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static struct net_device_stats * static struct net_device_stats *
ip6ip6_tnl_get_stats(struct net_device *dev) ip6ip6_tnl_get_stats(struct net_device *dev)
{ {
return &(((struct ip6_tnl *) dev->priv)->stat); return &(((struct ip6_tnl *)netdev_priv(dev))->stat);
} }
/** /**
@ -1073,7 +1073,7 @@ static void ip6ip6_tnl_dev_setup(struct net_device *dev)
static inline void static inline void
ip6ip6_tnl_dev_init_gen(struct net_device *dev) ip6ip6_tnl_dev_init_gen(struct net_device *dev)
{ {
struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; struct ip6_tnl *t = netdev_priv(dev);
t->fl.proto = IPPROTO_IPV6; t->fl.proto = IPPROTO_IPV6;
t->dev = dev; t->dev = dev;
strcpy(t->parms.name, dev->name); strcpy(t->parms.name, dev->name);
@ -1087,7 +1087,7 @@ ip6ip6_tnl_dev_init_gen(struct net_device *dev)
static int static int
ip6ip6_tnl_dev_init(struct net_device *dev) ip6ip6_tnl_dev_init(struct net_device *dev)
{ {
struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; struct ip6_tnl *t = netdev_priv(dev);
ip6ip6_tnl_dev_init_gen(dev); ip6ip6_tnl_dev_init_gen(dev);
ip6ip6_tnl_link_config(t); ip6ip6_tnl_link_config(t);
return 0; return 0;
@ -1103,7 +1103,7 @@ ip6ip6_tnl_dev_init(struct net_device *dev)
static int static int
ip6ip6_fb_tnl_dev_init(struct net_device *dev) ip6ip6_fb_tnl_dev_init(struct net_device *dev)
{ {
struct ip6_tnl *t = dev->priv; struct ip6_tnl *t = netdev_priv(dev);
ip6ip6_tnl_dev_init_gen(dev); ip6ip6_tnl_dev_init_gen(dev);
dev_hold(dev); dev_hold(dev);
tnls_wc[0] = t; tnls_wc[0] = t;

View File

@ -184,7 +184,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int
if (dev == NULL) if (dev == NULL)
return NULL; return NULL;
nt = dev->priv; nt = netdev_priv(dev);
dev->init = ipip6_tunnel_init; dev->init = ipip6_tunnel_init;
nt->parms = *parms; nt->parms = *parms;
@ -210,7 +210,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
write_unlock_bh(&ipip6_lock); write_unlock_bh(&ipip6_lock);
dev_put(dev); dev_put(dev);
} else { } else {
ipip6_tunnel_unlink((struct ip_tunnel*)dev->priv); ipip6_tunnel_unlink(netdev_priv(dev));
dev_put(dev); dev_put(dev);
} }
} }
@ -346,7 +346,7 @@ static void ipip6_err(struct sk_buff *skb, u32 info)
rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0);
if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) {
struct ip_tunnel * t = (struct ip_tunnel*)rt6i->rt6i_dev->priv; struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev);
if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) { if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) {
rel_type = ICMPV6_DEST_UNREACH; rel_type = ICMPV6_DEST_UNREACH;
rel_code = ICMPV6_ADDR_UNREACH; rel_code = ICMPV6_ADDR_UNREACH;
@ -424,7 +424,7 @@ static inline u32 try_6to4(struct in6_addr *v6dst)
static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->stat; struct net_device_stats *stats = &tunnel->stat;
struct iphdr *tiph = &tunnel->parms.iph; struct iphdr *tiph = &tunnel->parms.iph;
struct ipv6hdr *iph6 = skb->nh.ipv6h; struct ipv6hdr *iph6 = skb->nh.ipv6h;
@ -610,7 +610,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
t = ipip6_tunnel_locate(&p, 0); t = ipip6_tunnel_locate(&p, 0);
} }
if (t == NULL) if (t == NULL)
t = (struct ip_tunnel*)dev->priv; t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof(p)); memcpy(&p, &t->parms, sizeof(p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT; err = -EFAULT;
@ -647,7 +647,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL; err = -EINVAL;
break; break;
} }
t = (struct ip_tunnel*)dev->priv; t = netdev_priv(dev);
ipip6_tunnel_unlink(t); ipip6_tunnel_unlink(t);
t->parms.iph.saddr = p.iph.saddr; t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr; t->parms.iph.daddr = p.iph.daddr;
@ -683,7 +683,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
if ((t = ipip6_tunnel_locate(&p, 0)) == NULL) if ((t = ipip6_tunnel_locate(&p, 0)) == NULL)
goto done; goto done;
err = -EPERM; err = -EPERM;
if (t == ipip6_fb_tunnel_dev->priv) if (t == netdev_priv(ipip6_fb_tunnel_dev))
goto done; goto done;
dev = t->dev; dev = t->dev;
} }
@ -700,7 +700,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev) static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev)
{ {
return &(((struct ip_tunnel*)dev->priv)->stat); return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
} }
static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
@ -735,7 +735,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel; struct ip_tunnel *tunnel;
struct iphdr *iph; struct iphdr *iph;
tunnel = (struct ip_tunnel*)dev->priv; tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph; iph = &tunnel->parms.iph;
tunnel->dev = dev; tunnel->dev = dev;
@ -775,7 +775,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
static int __init ipip6_fb_tunnel_init(struct net_device *dev) static int __init ipip6_fb_tunnel_init(struct net_device *dev)
{ {
struct ip_tunnel *tunnel = dev->priv; struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph; struct iphdr *iph = &tunnel->parms.iph;
tunnel->dev = dev; tunnel->dev = dev;

View File

@ -297,8 +297,7 @@ static int pfkey_error(struct sadb_msg *orig, int err, struct sock *sk)
err = EINTR; err = EINTR;
if (err >= 512) if (err >= 512)
err = EINVAL; err = EINVAL;
if (err <= 0 || err >= 256) BUG_ON(err <= 0 || err >= 256);
BUG();
hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
pfkey_hdr_dup(hdr, orig); pfkey_hdr_dup(hdr, orig);

View File

@ -7,13 +7,13 @@ obj-y := sch_generic.o
obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o
obj-$(CONFIG_NET_CLS) += cls_api.o obj-$(CONFIG_NET_CLS) += cls_api.o
obj-$(CONFIG_NET_CLS_ACT) += act_api.o obj-$(CONFIG_NET_CLS_ACT) += act_api.o
obj-$(CONFIG_NET_ACT_POLICE) += police.o obj-$(CONFIG_NET_ACT_POLICE) += act_police.o
obj-$(CONFIG_NET_CLS_POLICE) += police.o obj-$(CONFIG_NET_CLS_POLICE) += act_police.o
obj-$(CONFIG_NET_ACT_GACT) += gact.o obj-$(CONFIG_NET_ACT_GACT) += act_gact.o
obj-$(CONFIG_NET_ACT_MIRRED) += mirred.o obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o
obj-$(CONFIG_NET_ACT_IPT) += ipt.o obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o
obj-$(CONFIG_NET_ACT_PEDIT) += pedit.o obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
obj-$(CONFIG_NET_ACT_SIMP) += simple.o obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o

View File

@ -165,7 +165,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
while ((a = act) != NULL) { while ((a = act) != NULL) {
repeat: repeat:
if (a->ops && a->ops->act) { if (a->ops && a->ops->act) {
ret = a->ops->act(&skb, a, res); ret = a->ops->act(skb, a, res);
if (TC_MUNGED & skb->tc_verd) { if (TC_MUNGED & skb->tc_verd) {
/* copied already, allow trampling */ /* copied already, allow trampling */
skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
@ -290,7 +290,7 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
if (a_o == NULL) { if (a_o == NULL) {
#ifdef CONFIG_KMOD #ifdef CONFIG_KMOD
rtnl_unlock(); rtnl_unlock();
request_module(act_name); request_module("act_%s", act_name);
rtnl_lock(); rtnl_lock();
a_o = tc_lookup_action_n(act_name); a_o = tc_lookup_action_n(act_name);

View File

@ -135,10 +135,9 @@ tcf_gact_cleanup(struct tc_action *a, int bind)
} }
static int static int
tcf_gact(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{ {
struct tcf_gact *p = PRIV(a, gact); struct tcf_gact *p = PRIV(a, gact);
struct sk_buff *skb = *pskb;
int action = TC_ACT_SHOT; int action = TC_ACT_SHOT;
spin_lock(&p->lock); spin_lock(&p->lock);

View File

@ -201,11 +201,10 @@ tcf_ipt_cleanup(struct tc_action *a, int bind)
} }
static int static int
tcf_ipt(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{ {
int ret = 0, result = 0; int ret = 0, result = 0;
struct tcf_ipt *p = PRIV(a, ipt); struct tcf_ipt *p = PRIV(a, ipt);
struct sk_buff *skb = *pskb;
if (skb_cloned(skb)) { if (skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
@ -222,6 +221,9 @@ tcf_ipt(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
worry later - danger - this API seems to have changed worry later - danger - this API seems to have changed
from earlier kernels */ from earlier kernels */
/* iptables targets take a double skb pointer in case the skb
* needs to be replaced. We don't own the skb, so this must not
* happen. The pskb_expand_head above should make sure of this */
ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL, ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL,
p->hook, p->t->data, NULL); p->hook, p->t->data, NULL);
switch (ret) { switch (ret) {

View File

@ -158,12 +158,11 @@ tcf_mirred_cleanup(struct tc_action *a, int bind)
} }
static int static int
tcf_mirred(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{ {
struct tcf_mirred *p = PRIV(a, mirred); struct tcf_mirred *p = PRIV(a, mirred);
struct net_device *dev; struct net_device *dev;
struct sk_buff *skb2 = NULL; struct sk_buff *skb2 = NULL;
struct sk_buff *skb = *pskb;
u32 at = G_TC_AT(skb->tc_verd); u32 at = G_TC_AT(skb->tc_verd);
spin_lock(&p->lock); spin_lock(&p->lock);

View File

@ -130,10 +130,9 @@ tcf_pedit_cleanup(struct tc_action *a, int bind)
} }
static int static int
tcf_pedit(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{ {
struct tcf_pedit *p = PRIV(a, pedit); struct tcf_pedit *p = PRIV(a, pedit);
struct sk_buff *skb = *pskb;
int i, munged = 0; int i, munged = 0;
u8 *pptr; u8 *pptr;
@ -246,10 +245,12 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse);
t.expires = jiffies_to_clock_t(p->tm.expires); t.expires = jiffies_to_clock_t(p->tm.expires);
RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t);
kfree(opt);
return skb->len; return skb->len;
rtattr_failure: rtattr_failure:
skb_trim(skb, b - skb->data); skb_trim(skb, b - skb->data);
kfree(opt);
return -1; return -1;
} }

View File

@ -284,11 +284,10 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind)
return 0; return 0;
} }
static int tcf_act_police(struct sk_buff **pskb, struct tc_action *a, static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
struct tcf_result *res) struct tcf_result *res)
{ {
psched_time_t now; psched_time_t now;
struct sk_buff *skb = *pskb;
struct tcf_police *p = PRIV(a); struct tcf_police *p = PRIV(a);
long toks; long toks;
long ptoks = 0; long ptoks = 0;
@ -408,7 +407,7 @@ police_cleanup_module(void)
module_init(police_init_module); module_init(police_init_module);
module_exit(police_cleanup_module); module_exit(police_cleanup_module);
#endif #else /* CONFIG_NET_CLS_ACT */
struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
{ {
@ -545,6 +544,7 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
spin_unlock(&p->lock); spin_unlock(&p->lock);
return p->action; return p->action;
} }
EXPORT_SYMBOL(tcf_police);
int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
{ {
@ -601,13 +601,4 @@ int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p)
return -1; return -1;
} }
#endif /* CONFIG_NET_CLS_ACT */
EXPORT_SYMBOL(tcf_police);
EXPORT_SYMBOL(tcf_police_destroy);
EXPORT_SYMBOL(tcf_police_dump);
EXPORT_SYMBOL(tcf_police_dump_stats);
EXPORT_SYMBOL(tcf_police_hash);
EXPORT_SYMBOL(tcf_police_ht);
EXPORT_SYMBOL(tcf_police_locate);
EXPORT_SYMBOL(tcf_police_lookup);
EXPORT_SYMBOL(tcf_police_new_index);

View File

@ -44,9 +44,8 @@ static DEFINE_RWLOCK(simp_lock);
#include <net/pkt_act.h> #include <net/pkt_act.h>
#include <net/act_generic.h> #include <net/act_generic.h>
static int tcf_simp(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{ {
struct sk_buff *skb = *pskb;
struct tcf_defact *p = PRIV(a, defact); struct tcf_defact *p = PRIV(a, defact);
spin_lock(&p->lock); spin_lock(&p->lock);

View File

@ -257,7 +257,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
(cl = cbq_class_lookup(q, prio)) != NULL) (cl = cbq_class_lookup(q, prio)) != NULL)
return cl; return cl;
*qerr = NET_XMIT_DROP; *qerr = NET_XMIT_BYPASS;
for (;;) { for (;;) {
int result = 0; int result = 0;
defmap = head->defaults; defmap = head->defaults;
@ -413,7 +413,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->rx_class = cl; q->rx_class = cl;
#endif #endif
if (cl == NULL) { if (cl == NULL) {
if (ret == NET_XMIT_DROP) if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;

View File

@ -208,7 +208,7 @@ struct hfsc_sched
do { \ do { \
struct timeval tv; \ struct timeval tv; \
do_gettimeofday(&tv); \ do_gettimeofday(&tv); \
(stamp) = 1000000ULL * tv.tv_sec + tv.tv_usec; \ (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \
} while (0) } while (0)
#endif #endif
@ -502,8 +502,8 @@ d2dx(u32 d)
u64 dx; u64 dx;
dx = ((u64)d * PSCHED_JIFFIE2US(HZ)); dx = ((u64)d * PSCHED_JIFFIE2US(HZ));
dx += 1000000 - 1; dx += USEC_PER_SEC - 1;
do_div(dx, 1000000); do_div(dx, USEC_PER_SEC);
return dx; return dx;
} }
@ -523,7 +523,7 @@ dx2d(u64 dx)
{ {
u64 d; u64 d;
d = dx * 1000000; d = dx * USEC_PER_SEC;
do_div(d, PSCHED_JIFFIE2US(HZ)); do_div(d, PSCHED_JIFFIE2US(HZ));
return (u32)d; return (u32)d;
} }
@ -1227,7 +1227,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (cl->level == 0) if (cl->level == 0)
return cl; return cl;
*qerr = NET_XMIT_DROP; *qerr = NET_XMIT_BYPASS;
tcf = q->root.filter_list; tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
@ -1643,7 +1643,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = hfsc_classify(skb, sch, &err); cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) { if (cl == NULL) {
if (err == NET_XMIT_DROP) if (err == NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return err; return err;

View File

@ -321,7 +321,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
return cl; return cl;
*qerr = NET_XMIT_DROP; *qerr = NET_XMIT_BYPASS;
tcf = q->filter_list; tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
@ -724,7 +724,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
} else if (!cl) { } else if (!cl) {
if (ret == NET_XMIT_DROP) if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb (skb); kfree_skb (skb);
return ret; return ret;

View File

@ -54,7 +54,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
u32 band = skb->priority; u32 band = skb->priority;
struct tcf_result res; struct tcf_result res;
*qerr = NET_XMIT_DROP; *qerr = NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) { if (TC_H_MAJ(skb->priority) != sch->handle) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
switch (tc_classify(skb, q->filter_list, &res)) { switch (tc_classify(skb, q->filter_list, &res)) {
@ -91,7 +91,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
qdisc = prio_classify(skb, sch, &ret); qdisc = prio_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) { if (qdisc == NULL) {
if (ret == NET_XMIT_DROP)
if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
@ -118,7 +119,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
qdisc = prio_classify(skb, sch, &ret); qdisc = prio_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) { if (qdisc == NULL) {
if (ret == NET_XMIT_DROP) if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++; sch->qstats.drops++;
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;

View File

@ -274,7 +274,7 @@ teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *de
static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct teql_master *master = (void*)dev->priv; struct teql_master *master = netdev_priv(dev);
struct Qdisc *start, *q; struct Qdisc *start, *q;
int busy; int busy;
int nores; int nores;
@ -350,7 +350,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
static int teql_master_open(struct net_device *dev) static int teql_master_open(struct net_device *dev)
{ {
struct Qdisc * q; struct Qdisc * q;
struct teql_master *m = (void*)dev->priv; struct teql_master *m = netdev_priv(dev);
int mtu = 0xFFFE; int mtu = 0xFFFE;
unsigned flags = IFF_NOARP|IFF_MULTICAST; unsigned flags = IFF_NOARP|IFF_MULTICAST;
@ -397,13 +397,13 @@ static int teql_master_close(struct net_device *dev)
static struct net_device_stats *teql_master_stats(struct net_device *dev) static struct net_device_stats *teql_master_stats(struct net_device *dev)
{ {
struct teql_master *m = (void*)dev->priv; struct teql_master *m = netdev_priv(dev);
return &m->stats; return &m->stats;
} }
static int teql_master_mtu(struct net_device *dev, int new_mtu) static int teql_master_mtu(struct net_device *dev, int new_mtu)
{ {
struct teql_master *m = (void*)dev->priv; struct teql_master *m = netdev_priv(dev);
struct Qdisc *q; struct Qdisc *q;
if (new_mtu < 68) if (new_mtu < 68)
@ -423,7 +423,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
static __init void teql_master_setup(struct net_device *dev) static __init void teql_master_setup(struct net_device *dev)
{ {
struct teql_master *master = dev->priv; struct teql_master *master = netdev_priv(dev);
struct Qdisc_ops *ops = &master->qops; struct Qdisc_ops *ops = &master->qops;
master->dev = dev; master->dev = dev;
@ -476,7 +476,7 @@ static int __init teql_init(void)
break; break;
} }
master = dev->priv; master = netdev_priv(dev);
strlcpy(master->qops.id, dev->name, IFNAMSIZ); strlcpy(master->qops.id, dev->name, IFNAMSIZ);
err = register_qdisc(&master->qops); err = register_qdisc(&master->qops);

View File

@ -1250,8 +1250,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_TIMER_START: case SCTP_CMD_TIMER_START:
timer = &asoc->timers[cmd->obj.to]; timer = &asoc->timers[cmd->obj.to];
timeout = asoc->timeouts[cmd->obj.to]; timeout = asoc->timeouts[cmd->obj.to];
if (!timeout) BUG_ON(!timeout);
BUG();
timer->expires = jiffies + timeout; timer->expires = jiffies + timeout;
sctp_association_hold(asoc); sctp_association_hold(asoc);

View File

@ -575,12 +575,11 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
if (rp->q.list.next == &cd->queue) { if (rp->q.list.next == &cd->queue) {
spin_unlock(&queue_lock); spin_unlock(&queue_lock);
up(&queue_io_sem); up(&queue_io_sem);
if (rp->offset) BUG_ON(rp->offset);
BUG();
return 0; return 0;
} }
rq = container_of(rp->q.list.next, struct cache_request, q.list); rq = container_of(rp->q.list.next, struct cache_request, q.list);
if (rq->q.reader) BUG(); BUG_ON(rq->q.reader);
if (rp->offset == 0) if (rp->offset == 0)
rq->readers++; rq->readers++;
spin_unlock(&queue_lock); spin_unlock(&queue_lock);

View File

@ -122,8 +122,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
rqstp->rq_argused = 0; rqstp->rq_argused = 0;
rqstp->rq_resused = 0; rqstp->rq_resused = 0;
arghi = 0; arghi = 0;
if (pages > RPCSVC_MAXPAGES) BUG_ON(pages > RPCSVC_MAXPAGES);
BUG();
while (pages) { while (pages) {
struct page *p = alloc_page(GFP_KERNEL); struct page *p = alloc_page(GFP_KERNEL);
if (!p) if (!p)

View File

@ -540,8 +540,7 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
start = end; start = end;
} }
} }
if (len) BUG_ON(len);
BUG();
} }
EXPORT_SYMBOL_GPL(skb_icv_walk); EXPORT_SYMBOL_GPL(skb_icv_walk);
@ -610,8 +609,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
start = end; start = end;
} }
} }
if (len) BUG_ON(len);
BUG();
return elt; return elt;
} }
EXPORT_SYMBOL_GPL(skb_to_sgvec); EXPORT_SYMBOL_GPL(skb_to_sgvec);

View File

@ -248,11 +248,9 @@ EXPORT_SYMBOL(xfrm_policy_alloc);
void __xfrm_policy_destroy(struct xfrm_policy *policy) void __xfrm_policy_destroy(struct xfrm_policy *policy)
{ {
if (!policy->dead) BUG_ON(!policy->dead);
BUG();
if (policy->bundles) BUG_ON(policy->bundles);
BUG();
if (del_timer(&policy->timer)) if (del_timer(&policy->timer))
BUG(); BUG();