mirror of https://gitee.com/openkylin/linux.git
[IP]: Simplify and consolidate MSG_PEEK error handling
When a packet is obtained from skb_recv_datagram with MSG_PEEK enabled it is left on the socket receive queue. This means that when we detect a checksum error we have to be careful when trying to free the packet as someone could have dequeued it in the time being. Currently this delicate logic is duplicated three times between UDPv4, UDPv6 and RAWv6. This patch moves them into a one place and simplifies the code somewhat. This is based on a suggestion by Eric Dumazet. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
57cca05af1
commit
3305b80c21
|
@ -1239,6 +1239,8 @@ extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
|
|||
int hlen,
|
||||
struct iovec *iov);
|
||||
extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
|
||||
extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int flags);
|
||||
extern unsigned int skb_checksum(const struct sk_buff *skb, int offset,
|
||||
int len, unsigned int csum);
|
||||
extern int skb_copy_bits(const struct sk_buff *skb, int offset,
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include <linux/rtnetlink.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <net/protocol.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
@ -199,6 +200,41 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
|
|||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_kill_datagram - Free a datagram skbuff forcibly
|
||||
* @sk: socket
|
||||
* @skb: datagram skbuff
|
||||
* @flags: MSG_ flags
|
||||
*
|
||||
* This function frees a datagram skbuff that was received by
|
||||
* skb_recv_datagram. The flags argument must match the one
|
||||
* used for skb_recv_datagram.
|
||||
*
|
||||
* If the MSG_PEEK flag is set, and the packet is still on the
|
||||
* receive queue of the socket, it will be taken off the queue
|
||||
* before it is freed.
|
||||
*
|
||||
* This function currently only disables BH when acquiring the
|
||||
* sk_receive_queue lock. Therefore it must not be used in a
|
||||
* context where that lock is acquired in an IRQ context.
|
||||
*/
|
||||
|
||||
void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
|
||||
{
|
||||
if (flags & MSG_PEEK) {
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
if (skb == skb_peek(&sk->sk_receive_queue)) {
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
atomic_dec(&skb->users);
|
||||
}
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(skb_kill_datagram);
|
||||
|
||||
/**
|
||||
* skb_copy_datagram_iovec - Copy a datagram to an iovec.
|
||||
* @skb: buffer to copy
|
||||
|
|
|
@ -846,20 +846,7 @@ static int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
csum_copy_err:
|
||||
UDP_INC_STATS_BH(UDP_MIB_INERRORS);
|
||||
|
||||
/* Clear queue. */
|
||||
if (flags&MSG_PEEK) {
|
||||
int clear = 0;
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
if (skb == skb_peek(&sk->sk_receive_queue)) {
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
clear = 1;
|
||||
}
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
if (clear)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
skb_free_datagram(sk, skb);
|
||||
skb_kill_datagram(sk, skb, flags);
|
||||
|
||||
if (noblock)
|
||||
return -EAGAIN;
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/icmpv6.h>
|
||||
#include <linux/netfilter.h>
|
||||
#include <linux/netfilter_ipv6.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/ioctls.h>
|
||||
#include <asm/bug.h>
|
||||
|
@ -433,25 +434,14 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|||
return err;
|
||||
|
||||
csum_copy_err:
|
||||
/* Clear queue. */
|
||||
if (flags&MSG_PEEK) {
|
||||
int clear = 0;
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
if (skb == skb_peek(&sk->sk_receive_queue)) {
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
clear = 1;
|
||||
}
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
if (clear)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
skb_kill_datagram(sk, skb, flags);
|
||||
|
||||
/* Error for blocking case is chosen to masquerade
|
||||
as some normal condition.
|
||||
*/
|
||||
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
|
||||
/* FIXME: increment a raw6 drops counter here */
|
||||
goto out_free;
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/ipv6.h>
|
||||
#include <linux/icmpv6.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <net/sock.h>
|
||||
|
@ -300,20 +301,7 @@ static int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|||
return err;
|
||||
|
||||
csum_copy_err:
|
||||
/* Clear queue. */
|
||||
if (flags&MSG_PEEK) {
|
||||
int clear = 0;
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
if (skb == skb_peek(&sk->sk_receive_queue)) {
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
clear = 1;
|
||||
}
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
if (clear)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
skb_free_datagram(sk, skb);
|
||||
skb_kill_datagram(sk, skb, flags);
|
||||
|
||||
if (flags & MSG_DONTWAIT) {
|
||||
UDP6_INC_STATS_USER(UDP_MIB_INERRORS);
|
||||
|
|
Loading…
Reference in New Issue