mirror of https://gitee.com/openkylin/linux.git
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: [NET]: Fix networking compilation errors [AF_RXRPC/AFS]: Arch-specific fixes. [AFS]: Fix VLocation record update wakeup [NET]: Revert sk_buff walker cleanups.
This commit is contained in:
commit
42fae7fb1c
|
@ -128,6 +128,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
|
|||
return (__force __wsum)result;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(csum_partial_copy_from_user);
|
||||
|
||||
__wsum
|
||||
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
|
||||
{
|
||||
|
|
|
@ -2020,6 +2020,7 @@ config AFS_FS
|
|||
tristate "Andrew File System support (AFS) (EXPERIMENTAL)"
|
||||
depends on INET && EXPERIMENTAL
|
||||
select AF_RXRPC
|
||||
select KEYS
|
||||
help
|
||||
If you say Y here, you will get an experimental Andrew File System
|
||||
driver. It currently only supports unsecured read-only AFS access.
|
||||
|
|
|
@ -367,7 +367,7 @@ struct afs_uuid {
|
|||
u32 time_low; /* low part of timestamp */
|
||||
u16 time_mid; /* mid part of timestamp */
|
||||
u16 time_hi_and_version; /* high part of timestamp and version */
|
||||
#define AFS_UUID_TO_UNIX_TIME 0x01b21dd213814000
|
||||
#define AFS_UUID_TO_UNIX_TIME 0x01b21dd213814000ULL
|
||||
#define AFS_UUID_TIMEHI_MASK 0x0fff
|
||||
#define AFS_UUID_VERSION_TIME 0x1000 /* time-based UUID */
|
||||
#define AFS_UUID_VERSION_NAME 0x3000 /* name-based UUID */
|
||||
|
|
|
@ -772,7 +772,7 @@ int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
|
|||
|
||||
if (call->offset < count) {
|
||||
if (last) {
|
||||
_leave(" = -EBADMSG [%d < %lu]", call->offset, count);
|
||||
_leave(" = -EBADMSG [%d < %zu]", call->offset, count);
|
||||
return -EBADMSG;
|
||||
}
|
||||
_leave(" = -EAGAIN");
|
||||
|
|
|
@ -243,7 +243,7 @@ static int afs_read_rtm(struct afs_rtm_desc *desc)
|
|||
desc->datalen = kernel_recvmsg(desc->nlsock, &msg, iov, 1,
|
||||
desc->datamax, 0);
|
||||
if (desc->datalen < 0) {
|
||||
_leave(" = %ld [recv]", desc->datalen);
|
||||
_leave(" = %zd [recv]", desc->datalen);
|
||||
return desc->datalen;
|
||||
}
|
||||
|
||||
|
|
|
@ -416,8 +416,8 @@ struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
|
|||
goto error_abandon;
|
||||
spin_lock(&vl->lock);
|
||||
vl->state = AFS_VL_VALID;
|
||||
wake_up(&vl->waitq);
|
||||
spin_unlock(&vl->lock);
|
||||
wake_up(&vl->waitq);
|
||||
|
||||
/* schedule for regular updates */
|
||||
afs_vlocation_queue_for_updates(vl);
|
||||
|
@ -442,7 +442,7 @@ struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
|
|||
|
||||
_debug("invalid [state %d]", state);
|
||||
|
||||
if ((state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME)) {
|
||||
if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) {
|
||||
vl->state = AFS_VL_CREATING;
|
||||
spin_unlock(&vl->lock);
|
||||
goto fill_in_record;
|
||||
|
@ -453,11 +453,10 @@ struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
|
|||
_debug("wait");
|
||||
|
||||
spin_unlock(&vl->lock);
|
||||
ret = wait_event_interruptible(
|
||||
vl->waitq,
|
||||
vl->state == AFS_VL_NEW ||
|
||||
vl->state == AFS_VL_VALID ||
|
||||
vl->state == AFS_VL_NO_VOLUME);
|
||||
ret = wait_event_interruptible(vl->waitq,
|
||||
vl->state == AFS_VL_NEW ||
|
||||
vl->state == AFS_VL_VALID ||
|
||||
vl->state == AFS_VL_NO_VOLUME);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
spin_lock(&vl->lock);
|
||||
|
@ -471,8 +470,8 @@ struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
|
|||
error_abandon:
|
||||
spin_lock(&vl->lock);
|
||||
vl->state = AFS_VL_NEW;
|
||||
wake_up(&vl->waitq);
|
||||
spin_unlock(&vl->lock);
|
||||
wake_up(&vl->waitq);
|
||||
error:
|
||||
ASSERT(vl != NULL);
|
||||
afs_put_vlocation(vl);
|
||||
|
@ -675,7 +674,6 @@ static void afs_vlocation_updater(struct work_struct *work)
|
|||
case 0:
|
||||
afs_vlocation_apply_update(vl, &vldb);
|
||||
vl->state = AFS_VL_VALID;
|
||||
wake_up(&vl->waitq);
|
||||
break;
|
||||
case -ENOMEDIUM:
|
||||
vl->state = AFS_VL_VOLUME_DELETED;
|
||||
|
@ -685,6 +683,7 @@ static void afs_vlocation_updater(struct work_struct *work)
|
|||
break;
|
||||
}
|
||||
spin_unlock(&vl->lock);
|
||||
wake_up(&vl->waitq);
|
||||
|
||||
/* and then reschedule */
|
||||
_debug("reschedule");
|
||||
|
|
|
@ -10,7 +10,7 @@ extern int wext_proc_init(void);
|
|||
extern int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd,
|
||||
void __user *arg);
|
||||
#else
|
||||
static inline int wext_proc_init()
|
||||
static inline int wext_proc_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -279,6 +279,8 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
|
|||
|
||||
return ktime_add(kt, tmp);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(ktime_add_ns);
|
||||
# endif /* !CONFIG_KTIME_SCALAR */
|
||||
|
||||
/*
|
||||
|
|
|
@ -937,11 +937,11 @@ static unsigned long atalk_sum_partial(const unsigned char *data,
|
|||
static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
|
||||
int len, unsigned long sum)
|
||||
{
|
||||
int end = skb_headlen(skb);
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy;
|
||||
|
||||
/* checksum stuff in header space */
|
||||
if ((copy = end - offset) > 0) {
|
||||
if ( (copy = start - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
sum = atalk_sum_partial(skb->data + offset, copy, sum);
|
||||
|
@ -953,9 +953,11 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
|
|||
|
||||
/* checksum stuff in frags */
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + skb_shinfo(skb)->frags[i].size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
if ((copy = end - offset) > 0) {
|
||||
u8 *vaddr;
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
@ -963,31 +965,36 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
|
|||
if (copy > len)
|
||||
copy = len;
|
||||
vaddr = kmap_skb_frag(frag);
|
||||
sum = atalk_sum_partial(vaddr + frag->page_offset,
|
||||
copy, sum);
|
||||
sum = atalk_sum_partial(vaddr + frag->page_offset +
|
||||
offset - start, copy, sum);
|
||||
kunmap_skb_frag(vaddr);
|
||||
|
||||
if (!(len -= copy))
|
||||
return sum;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
struct sk_buff *list = skb_shinfo(skb)->frag_list;
|
||||
|
||||
for (; list; list = list->next) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
sum = atalk_sum_skb(list, 0, copy, sum);
|
||||
sum = atalk_sum_skb(list, offset - start,
|
||||
copy, sum);
|
||||
if ((len -= copy) == 0)
|
||||
return sum;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -247,8 +247,8 @@ EXPORT_SYMBOL(skb_kill_datagram);
|
|||
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
|
||||
struct iovec *to, int len)
|
||||
{
|
||||
int end = skb_headlen(skb);
|
||||
int i, copy = end - offset;
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
|
||||
/* Copy header. */
|
||||
if (copy > 0) {
|
||||
|
@ -263,9 +263,11 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
|
|||
|
||||
/* Copy paged appendix. Hmm... why does this look so complicated? */
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + skb_shinfo(skb)->frags[i].size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
if ((copy = end - offset) > 0) {
|
||||
int err;
|
||||
u8 *vaddr;
|
||||
|
@ -275,8 +277,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
|
|||
if (copy > len)
|
||||
copy = len;
|
||||
vaddr = kmap(page);
|
||||
err = memcpy_toiovec(to, vaddr + frag->page_offset,
|
||||
copy);
|
||||
err = memcpy_toiovec(to, vaddr + frag->page_offset +
|
||||
offset - start, copy);
|
||||
kunmap(page);
|
||||
if (err)
|
||||
goto fault;
|
||||
|
@ -284,24 +286,30 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
|
|||
return 0;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
struct sk_buff *list = skb_shinfo(skb)->frag_list;
|
||||
|
||||
for (; list; list = list->next) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
if (skb_copy_datagram_iovec(list, 0, to, copy))
|
||||
if (skb_copy_datagram_iovec(list,
|
||||
offset - start,
|
||||
to, copy))
|
||||
goto fault;
|
||||
if ((len -= copy) == 0)
|
||||
return 0;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
if (!len)
|
||||
|
@ -315,9 +323,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
|||
u8 __user *to, int len,
|
||||
__wsum *csump)
|
||||
{
|
||||
int end = skb_headlen(skb);
|
||||
int start = skb_headlen(skb);
|
||||
int pos = 0;
|
||||
int i, copy = end - offset;
|
||||
int i, copy = start - offset;
|
||||
|
||||
/* Copy header. */
|
||||
if (copy > 0) {
|
||||
|
@ -336,9 +344,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
|||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + skb_shinfo(skb)->frags[i].size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
if ((copy = end - offset) > 0) {
|
||||
__wsum csum2;
|
||||
int err = 0;
|
||||
|
@ -350,7 +360,8 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
|||
copy = len;
|
||||
vaddr = kmap(page);
|
||||
csum2 = csum_and_copy_to_user(vaddr +
|
||||
frag->page_offset,
|
||||
frag->page_offset +
|
||||
offset - start,
|
||||
to, copy, 0, &err);
|
||||
kunmap(page);
|
||||
if (err)
|
||||
|
@ -362,20 +373,24 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
|||
to += copy;
|
||||
pos += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
struct sk_buff *list = skb_shinfo(skb)->frag_list;
|
||||
|
||||
for (; list; list=list->next) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
__wsum csum2 = 0;
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
if (skb_copy_and_csum_datagram(list, 0,
|
||||
if (skb_copy_and_csum_datagram(list,
|
||||
offset - start,
|
||||
to, copy,
|
||||
&csum2))
|
||||
goto fault;
|
||||
|
@ -386,6 +401,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
|||
to += copy;
|
||||
pos += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
if (!len)
|
||||
|
|
|
@ -1045,13 +1045,13 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
|
|||
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
|
||||
{
|
||||
int i, copy;
|
||||
int end = skb_headlen(skb);
|
||||
int start = skb_headlen(skb);
|
||||
|
||||
if (offset > (int)skb->len - len)
|
||||
goto fault;
|
||||
|
||||
/* Copy header. */
|
||||
if ((copy = end - offset) > 0) {
|
||||
if ((copy = start - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
skb_copy_from_linear_data_offset(skb, offset, to, copy);
|
||||
|
@ -1062,9 +1062,11 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
|
|||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + skb_shinfo(skb)->frags[i].size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
if ((copy = end - offset) > 0) {
|
||||
u8 *vaddr;
|
||||
|
||||
|
@ -1073,8 +1075,8 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
|
|||
|
||||
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
|
||||
memcpy(to,
|
||||
vaddr + skb_shinfo(skb)->frags[i].page_offset,
|
||||
copy);
|
||||
vaddr + skb_shinfo(skb)->frags[i].page_offset+
|
||||
offset - start, copy);
|
||||
kunmap_skb_frag(vaddr);
|
||||
|
||||
if ((len -= copy) == 0)
|
||||
|
@ -1082,25 +1084,30 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
|
|||
offset += copy;
|
||||
to += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
struct sk_buff *list = skb_shinfo(skb)->frag_list;
|
||||
|
||||
for (; list; list = list->next) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
if (skb_copy_bits(list, 0, to, copy))
|
||||
if (skb_copy_bits(list, offset - start,
|
||||
to, copy))
|
||||
goto fault;
|
||||
if ((len -= copy) == 0)
|
||||
return 0;
|
||||
offset += copy;
|
||||
to += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
if (!len)
|
||||
|
@ -1125,12 +1132,12 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
|
|||
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
|
||||
{
|
||||
int i, copy;
|
||||
int end = skb_headlen(skb);
|
||||
int start = skb_headlen(skb);
|
||||
|
||||
if (offset > (int)skb->len - len)
|
||||
goto fault;
|
||||
|
||||
if ((copy = end - offset) > 0) {
|
||||
if ((copy = start - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
skb_copy_to_linear_data_offset(skb, offset, from, copy);
|
||||
|
@ -1142,9 +1149,11 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
|
|||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + frag->size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + frag->size;
|
||||
if ((copy = end - offset) > 0) {
|
||||
u8 *vaddr;
|
||||
|
||||
|
@ -1152,7 +1161,8 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
|
|||
copy = len;
|
||||
|
||||
vaddr = kmap_skb_frag(frag);
|
||||
memcpy(vaddr + frag->page_offset, from, copy);
|
||||
memcpy(vaddr + frag->page_offset + offset - start,
|
||||
from, copy);
|
||||
kunmap_skb_frag(vaddr);
|
||||
|
||||
if ((len -= copy) == 0)
|
||||
|
@ -1160,25 +1170,30 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
|
|||
offset += copy;
|
||||
from += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
struct sk_buff *list = skb_shinfo(skb)->frag_list;
|
||||
|
||||
for (; list; list = list->next) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
if (skb_store_bits(list, 0, from, copy))
|
||||
if (skb_store_bits(list, offset - start,
|
||||
from, copy))
|
||||
goto fault;
|
||||
if ((len -= copy) == 0)
|
||||
return 0;
|
||||
offset += copy;
|
||||
from += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
if (!len)
|
||||
|
@ -1195,8 +1210,8 @@ EXPORT_SYMBOL(skb_store_bits);
|
|||
__wsum skb_checksum(const struct sk_buff *skb, int offset,
|
||||
int len, __wsum csum)
|
||||
{
|
||||
int end = skb_headlen(skb);
|
||||
int i, copy = end - offset;
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
int pos = 0;
|
||||
|
||||
/* Checksum header. */
|
||||
|
@ -1211,9 +1226,11 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
|
|||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + skb_shinfo(skb)->frags[i].size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
if ((copy = end - offset) > 0) {
|
||||
__wsum csum2;
|
||||
u8 *vaddr;
|
||||
|
@ -1222,8 +1239,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
|
|||
if (copy > len)
|
||||
copy = len;
|
||||
vaddr = kmap_skb_frag(frag);
|
||||
csum2 = csum_partial(vaddr + frag->page_offset,
|
||||
copy, 0);
|
||||
csum2 = csum_partial(vaddr + frag->page_offset +
|
||||
offset - start, copy, 0);
|
||||
kunmap_skb_frag(vaddr);
|
||||
csum = csum_block_add(csum, csum2, pos);
|
||||
if (!(len -= copy))
|
||||
|
@ -1231,26 +1248,31 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
|
|||
offset += copy;
|
||||
pos += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
struct sk_buff *list = skb_shinfo(skb)->frag_list;
|
||||
|
||||
for (; list; list = list->next) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
__wsum csum2;
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
csum2 = skb_checksum(list, 0, copy, 0);
|
||||
csum2 = skb_checksum(list, offset - start,
|
||||
copy, 0);
|
||||
csum = csum_block_add(csum, csum2, pos);
|
||||
if ((len -= copy) == 0)
|
||||
return csum;
|
||||
offset += copy;
|
||||
pos += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
BUG_ON(len);
|
||||
|
@ -1263,8 +1285,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
|
|||
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
|
||||
u8 *to, int len, __wsum csum)
|
||||
{
|
||||
int end = skb_headlen(skb);
|
||||
int i, copy = end - offset;
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
int pos = 0;
|
||||
|
||||
/* Copy header. */
|
||||
|
@ -1281,9 +1303,11 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
|
|||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + skb_shinfo(skb)->frags[i].size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
if ((copy = end - offset) > 0) {
|
||||
__wsum csum2;
|
||||
u8 *vaddr;
|
||||
|
@ -1293,8 +1317,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
|
|||
copy = len;
|
||||
vaddr = kmap_skb_frag(frag);
|
||||
csum2 = csum_partial_copy_nocheck(vaddr +
|
||||
frag->page_offset,
|
||||
to, copy, 0);
|
||||
frag->page_offset +
|
||||
offset - start, to,
|
||||
copy, 0);
|
||||
kunmap_skb_frag(vaddr);
|
||||
csum = csum_block_add(csum, csum2, pos);
|
||||
if (!(len -= copy))
|
||||
|
@ -1303,6 +1328,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
|
|||
to += copy;
|
||||
pos += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
|
@ -1310,13 +1336,16 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
|
|||
|
||||
for (; list; list = list->next) {
|
||||
__wsum csum2;
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
csum2 = skb_copy_and_csum_bits(list, 0,
|
||||
csum2 = skb_copy_and_csum_bits(list,
|
||||
offset - start,
|
||||
to, copy, 0);
|
||||
csum = csum_block_add(csum, csum2, pos);
|
||||
if ((len -= copy) == 0)
|
||||
|
@ -1325,6 +1354,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
|
|||
to += copy;
|
||||
pos += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
BUG_ON(len);
|
||||
|
@ -1996,8 +2026,8 @@ void __init skb_init(void)
|
|||
int
|
||||
skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
|
||||
{
|
||||
int end = skb_headlen(skb);
|
||||
int i, copy = end - offset;
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
int elt = 0;
|
||||
|
||||
if (copy > 0) {
|
||||
|
@ -2013,39 +2043,45 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
|
|||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + skb_shinfo(skb)->frags[i].size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
if ((copy = end - offset) > 0) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
sg[elt].page = frag->page;
|
||||
sg[elt].offset = frag->page_offset;
|
||||
sg[elt].offset = frag->page_offset+offset-start;
|
||||
sg[elt].length = copy;
|
||||
elt++;
|
||||
if (!(len -= copy))
|
||||
return elt;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
struct sk_buff *list = skb_shinfo(skb)->frag_list;
|
||||
|
||||
for (; list; list = list->next) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
elt += skb_to_sgvec(list, sg+elt, 0, copy);
|
||||
elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
|
||||
if ((len -= copy) == 0)
|
||||
return elt;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
BUG_ON(len);
|
||||
|
|
|
@ -49,8 +49,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
|
|||
struct sk_buff *skb, int offset, struct iovec *to,
|
||||
size_t len, struct dma_pinned_list *pinned_list)
|
||||
{
|
||||
int end = skb_headlen(skb);
|
||||
int i, copy = end - offset;
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
dma_cookie_t cookie = 0;
|
||||
|
||||
/* Copy header. */
|
||||
|
@ -69,9 +69,11 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
|
|||
|
||||
/* Copy paged appendix. Hmm... why does this look so complicated? */
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + skb_shinfo(skb)->frags[i].size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
copy = end - offset;
|
||||
if ((copy = end - offset) > 0) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
@ -80,8 +82,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
|
|||
if (copy > len)
|
||||
copy = len;
|
||||
|
||||
cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list,
|
||||
page, frag->page_offset, copy);
|
||||
cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
|
||||
frag->page_offset + offset - start, copy);
|
||||
if (cookie < 0)
|
||||
goto fault;
|
||||
len -= copy;
|
||||
|
@ -89,21 +91,25 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
|
|||
goto end;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
struct sk_buff *list = skb_shinfo(skb)->frag_list;
|
||||
|
||||
for (; list; list = list->next) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
copy = end - offset;
|
||||
if (copy > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
cookie = dma_skb_copy_datagram_iovec(chan, list,
|
||||
0, to, copy, pinned_list);
|
||||
offset - start, to, copy,
|
||||
pinned_list);
|
||||
if (cookie < 0)
|
||||
goto fault;
|
||||
len -= copy;
|
||||
|
@ -111,6 +117,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
|
|||
goto end;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,11 @@ config AF_RXRPC_DEBUG
|
|||
config RXKAD
|
||||
tristate "RxRPC Kerberos security"
|
||||
depends on AF_RXRPC && KEYS
|
||||
select CRYPTO
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_PCBC
|
||||
select CRYPTO_FCRYPT
|
||||
help
|
||||
Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC
|
||||
through the use of the key retention service.
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/ctype.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#define rxrpc_debug rxkad_debug
|
||||
#include "ar-internal.h"
|
||||
|
||||
#define RXKAD_VERSION 2
|
||||
|
|
|
@ -532,8 +532,8 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
|
|||
int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
|
||||
int offset, int len, icv_update_fn_t icv_update)
|
||||
{
|
||||
int end = skb_headlen(skb);
|
||||
int i, copy = end - offset;
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
int err;
|
||||
struct scatterlist sg;
|
||||
|
||||
|
@ -556,9 +556,11 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
|
|||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + skb_shinfo(skb)->frags[i].size;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
if ((copy = end - offset) > 0) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
|
@ -566,7 +568,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
|
|||
copy = len;
|
||||
|
||||
sg.page = frag->page;
|
||||
sg.offset = frag->page_offset;
|
||||
sg.offset = frag->page_offset + offset-start;
|
||||
sg.length = copy;
|
||||
|
||||
err = icv_update(desc, &sg, copy);
|
||||
|
@ -577,19 +579,22 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
|
|||
return 0;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
if (skb_shinfo(skb)->frag_list) {
|
||||
struct sk_buff *list = skb_shinfo(skb)->frag_list;
|
||||
|
||||
for (; list; list = list->next) {
|
||||
BUG_TRAP(len >= 0);
|
||||
int end;
|
||||
|
||||
end = offset + list->len;
|
||||
BUG_TRAP(start <= offset + len);
|
||||
|
||||
end = start + list->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
err = skb_icv_walk(list, desc, 0,
|
||||
err = skb_icv_walk(list, desc, offset-start,
|
||||
copy, icv_update);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
@ -597,6 +602,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
|
|||
return 0;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
BUG_ON(len);
|
||||
|
|
Loading…
Reference in New Issue