mirror of https://gitee.com/openkylin/linux.git
Merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git/
This commit is contained in:
commit
8800cea620
|
@ -87,8 +87,7 @@ static void z_comp_free(void *arg)
|
|||
|
||||
if (state) {
|
||||
zlib_deflateEnd(&state->strm);
|
||||
if (state->strm.workspace)
|
||||
vfree(state->strm.workspace);
|
||||
vfree(state->strm.workspace);
|
||||
kfree(state);
|
||||
}
|
||||
}
|
||||
|
@ -308,8 +307,7 @@ static void z_decomp_free(void *arg)
|
|||
|
||||
if (state) {
|
||||
zlib_inflateEnd(&state->strm);
|
||||
if (state->strm.workspace)
|
||||
kfree(state->strm.workspace);
|
||||
kfree(state->strm.workspace);
|
||||
kfree(state);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2467,14 +2467,10 @@ static void ppp_destroy_interface(struct ppp *ppp)
|
|||
skb_queue_purge(&ppp->mrq);
|
||||
#endif /* CONFIG_PPP_MULTILINK */
|
||||
#ifdef CONFIG_PPP_FILTER
|
||||
if (ppp->pass_filter) {
|
||||
kfree(ppp->pass_filter);
|
||||
ppp->pass_filter = NULL;
|
||||
}
|
||||
if (ppp->active_filter) {
|
||||
kfree(ppp->active_filter);
|
||||
ppp->active_filter = NULL;
|
||||
}
|
||||
kfree(ppp->pass_filter);
|
||||
ppp->pass_filter = NULL;
|
||||
kfree(ppp->active_filter);
|
||||
ppp->active_filter = NULL;
|
||||
#endif /* CONFIG_PPP_FILTER */
|
||||
|
||||
kfree(ppp);
|
||||
|
|
|
@ -436,9 +436,7 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
|
|||
}
|
||||
|
||||
if (err) {
|
||||
if (chan->local_addr)
|
||||
kfree(chan->local_addr);
|
||||
|
||||
kfree(chan->local_addr);
|
||||
kfree(chan);
|
||||
return err;
|
||||
}
|
||||
|
@ -458,9 +456,7 @@ static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
|
|||
struct cycx_x25_channel *chan = dev->priv;
|
||||
|
||||
if (chan->svc) {
|
||||
if (chan->local_addr)
|
||||
kfree(chan->local_addr);
|
||||
|
||||
kfree(chan->local_addr);
|
||||
if (chan->state == WAN_CONNECTED)
|
||||
del_timer(&chan->timer);
|
||||
}
|
||||
|
|
|
@ -400,10 +400,8 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
|
|||
cpc_tty->buf_rx.last = NULL;
|
||||
}
|
||||
|
||||
if (cpc_tty->buf_tx) {
|
||||
kfree(cpc_tty->buf_tx);
|
||||
cpc_tty->buf_tx = NULL;
|
||||
}
|
||||
kfree(cpc_tty->buf_tx);
|
||||
cpc_tty->buf_tx = NULL;
|
||||
|
||||
CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
|
||||
|
||||
|
@ -666,7 +664,7 @@ static void cpc_tty_rx_work(void * data)
|
|||
unsigned long port;
|
||||
int i, j;
|
||||
st_cpc_tty_area *cpc_tty;
|
||||
volatile st_cpc_rx_buf * buf;
|
||||
volatile st_cpc_rx_buf *buf;
|
||||
char flags=0,flg_rx=1;
|
||||
struct tty_ldisc *ld;
|
||||
|
||||
|
@ -680,9 +678,9 @@ static void cpc_tty_rx_work(void * data)
|
|||
cpc_tty = &cpc_tty_area[port];
|
||||
|
||||
if ((buf=cpc_tty->buf_rx.first) != 0) {
|
||||
if(cpc_tty->tty) {
|
||||
if (cpc_tty->tty) {
|
||||
ld = tty_ldisc_ref(cpc_tty->tty);
|
||||
if(ld) {
|
||||
if (ld) {
|
||||
if (ld->receive_buf) {
|
||||
CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name);
|
||||
ld->receive_buf(cpc_tty->tty, (char *)(buf->data), &flags, buf->size);
|
||||
|
@ -691,7 +689,7 @@ static void cpc_tty_rx_work(void * data)
|
|||
}
|
||||
}
|
||||
cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
|
||||
kfree((unsigned char *)buf);
|
||||
kfree(buf);
|
||||
buf = cpc_tty->buf_rx.first;
|
||||
flg_rx = 1;
|
||||
}
|
||||
|
@ -733,7 +731,7 @@ static void cpc_tty_rx_disc_frame(pc300ch_t *pc300chan)
|
|||
|
||||
void cpc_tty_receive(pc300dev_t *pc300dev)
|
||||
{
|
||||
st_cpc_tty_area *cpc_tty;
|
||||
st_cpc_tty_area *cpc_tty;
|
||||
pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
|
||||
pc300_t *card = (pc300_t *)pc300chan->card;
|
||||
int ch = pc300chan->channel;
|
||||
|
@ -742,7 +740,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
|
|||
int rx_len, rx_aux;
|
||||
volatile unsigned char status;
|
||||
unsigned short first_bd = pc300chan->rx_first_bd;
|
||||
st_cpc_rx_buf *new=NULL;
|
||||
st_cpc_rx_buf *new = NULL;
|
||||
unsigned char dsr_rx;
|
||||
|
||||
if (pc300dev->cpc_tty == NULL) {
|
||||
|
@ -762,7 +760,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
|
|||
if (status & DST_EOM) {
|
||||
break;
|
||||
}
|
||||
ptdescr=(pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next));
|
||||
ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next));
|
||||
}
|
||||
|
||||
if (!rx_len) {
|
||||
|
@ -771,10 +769,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
|
|||
cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
|
||||
RX_BD_ADDR(ch, pc300chan->rx_last_bd));
|
||||
}
|
||||
if (new) {
|
||||
kfree(new);
|
||||
new = NULL;
|
||||
}
|
||||
kfree(new);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -787,7 +782,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
|
|||
continue;
|
||||
}
|
||||
|
||||
new = (st_cpc_rx_buf *) kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
|
||||
new = (st_cpc_rx_buf *)kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
|
||||
if (new == 0) {
|
||||
cpc_tty_rx_disc_frame(pc300chan);
|
||||
continue;
|
||||
|
|
|
@ -3664,15 +3664,10 @@ static void wanpipe_tty_close(struct tty_struct *tty, struct file * filp)
|
|||
chdlc_disable_comm_shutdown(card);
|
||||
unlock_adapter_irq(&card->wandev.lock,&smp_flags);
|
||||
|
||||
if (card->tty_buf){
|
||||
kfree(card->tty_buf);
|
||||
card->tty_buf=NULL;
|
||||
}
|
||||
|
||||
if (card->tty_rx){
|
||||
kfree(card->tty_rx);
|
||||
card->tty_rx=NULL;
|
||||
}
|
||||
kfree(card->tty_buf);
|
||||
card->tty_buf = NULL;
|
||||
kfree(card->tty_rx);
|
||||
card->tty_rx = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -107,13 +107,9 @@ static struct x25_asy *x25_asy_alloc(void)
|
|||
static void x25_asy_free(struct x25_asy *sl)
|
||||
{
|
||||
/* Free all X.25 frame buffers. */
|
||||
if (sl->rbuff) {
|
||||
kfree(sl->rbuff);
|
||||
}
|
||||
kfree(sl->rbuff);
|
||||
sl->rbuff = NULL;
|
||||
if (sl->xbuff) {
|
||||
kfree(sl->xbuff);
|
||||
}
|
||||
kfree(sl->xbuff);
|
||||
sl->xbuff = NULL;
|
||||
|
||||
if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) {
|
||||
|
@ -134,10 +130,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
|
|||
{
|
||||
printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n",
|
||||
dev->name);
|
||||
if (xbuff != NULL)
|
||||
kfree(xbuff);
|
||||
if (rbuff != NULL)
|
||||
kfree(rbuff);
|
||||
kfree(xbuff);
|
||||
kfree(rbuff);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -169,10 +163,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
|
|||
|
||||
spin_unlock_bh(&sl->lock);
|
||||
|
||||
if (xbuff != NULL)
|
||||
kfree(xbuff);
|
||||
if (rbuff != NULL)
|
||||
kfree(rbuff);
|
||||
kfree(xbuff);
|
||||
kfree(rbuff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,10 +89,14 @@ enum {
|
|||
RTM_GETANYCAST = 62,
|
||||
#define RTM_GETANYCAST RTM_GETANYCAST
|
||||
|
||||
RTM_MAX,
|
||||
#define RTM_MAX RTM_MAX
|
||||
__RTM_MAX,
|
||||
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
|
||||
};
|
||||
|
||||
#define RTM_NR_MSGTYPES (RTM_MAX + 1 - RTM_BASE)
|
||||
#define RTM_NR_FAMILIES (RTM_NR_MSGTYPES >> 2)
|
||||
#define RTM_FAM(cmd) (((cmd) - RTM_BASE) >> 2)
|
||||
|
||||
/*
|
||||
Generic structure for encapsulation of optional route information.
|
||||
It is reminiscent of sockaddr, but with sa_family replaced
|
||||
|
|
|
@ -140,8 +140,11 @@ enum {
|
|||
XFRM_MSG_FLUSHPOLICY,
|
||||
#define XFRM_MSG_FLUSHPOLICY XFRM_MSG_FLUSHPOLICY
|
||||
|
||||
XFRM_MSG_MAX
|
||||
__XFRM_MSG_MAX
|
||||
};
|
||||
#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
|
||||
|
||||
#define XFRM_NR_MSGTYPES (XFRM_MSG_MAX + 1 - XFRM_MSG_BASE)
|
||||
|
||||
struct xfrm_user_tmpl {
|
||||
struct xfrm_id id;
|
||||
|
|
|
@ -46,6 +46,7 @@ struct prefix_info {
|
|||
#include <linux/in6.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/if_inet6.h>
|
||||
#include <net/ipv6.h>
|
||||
|
||||
#define IN6_ADDR_HSIZE 16
|
||||
|
||||
|
|
|
@ -157,7 +157,8 @@ psched_tod_diff(int delta_sec, int bound)
|
|||
case 1: \
|
||||
__delta += 1000000; \
|
||||
case 0: \
|
||||
__delta = abs(__delta); \
|
||||
if (__delta > bound || __delta < 0) \
|
||||
__delta = bound; \
|
||||
} \
|
||||
__delta; \
|
||||
})
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef _NET_XFRM_H
|
||||
#define _NET_XFRM_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/xfrm.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
|
@ -516,6 +517,15 @@ struct xfrm_dst
|
|||
u32 child_mtu_cached;
|
||||
};
|
||||
|
||||
static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
|
||||
{
|
||||
dst_release(xdst->route);
|
||||
if (likely(xdst->u.dst.xfrm))
|
||||
xfrm_state_put(xdst->u.dst.xfrm);
|
||||
}
|
||||
|
||||
extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
|
||||
|
||||
/* Decapsulation state, used by the input to store data during
|
||||
* decapsulation procedure, to be used later (during the policy
|
||||
* check
|
||||
|
|
|
@ -427,7 +427,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
/* Get message from skb (based on rtnetlink_rcv_skb). Each message is
|
||||
* processed by audit_receive_msg. Malformed skbs with wrong length are
|
||||
* discarded silently. */
|
||||
static int audit_receive_skb(struct sk_buff *skb)
|
||||
static void audit_receive_skb(struct sk_buff *skb)
|
||||
{
|
||||
int err;
|
||||
struct nlmsghdr *nlh;
|
||||
|
@ -436,7 +436,7 @@ static int audit_receive_skb(struct sk_buff *skb)
|
|||
while (skb->len >= NLMSG_SPACE(0)) {
|
||||
nlh = (struct nlmsghdr *)skb->data;
|
||||
if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
|
||||
return 0;
|
||||
return;
|
||||
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
|
||||
if (rlen > skb->len)
|
||||
rlen = skb->len;
|
||||
|
@ -446,23 +446,20 @@ static int audit_receive_skb(struct sk_buff *skb)
|
|||
netlink_ack(skb, nlh, 0);
|
||||
skb_pull(skb, rlen);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Receive messages from netlink socket. */
|
||||
static void audit_receive(struct sock *sk, int length)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
unsigned int qlen;
|
||||
|
||||
if (down_trylock(&audit_netlink_sem))
|
||||
return;
|
||||
down(&audit_netlink_sem);
|
||||
|
||||
/* FIXME: this must not cause starvation */
|
||||
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
|
||||
if (audit_receive_skb(skb) && skb->len)
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
else
|
||||
kfree_skb(skb);
|
||||
for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
audit_receive_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
up(&audit_netlink_sem);
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/netdevice.h>
|
||||
#include <linux/if.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -74,6 +75,12 @@ void linkwatch_run_queue(void)
|
|||
clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
|
||||
|
||||
if (dev->flags & IFF_UP) {
|
||||
if (netif_carrier_ok(dev)) {
|
||||
WARN_ON(dev->qdisc_sleeping == &noop_qdisc);
|
||||
dev_activate(dev);
|
||||
} else
|
||||
dev_deactivate(dev);
|
||||
|
||||
netdev_state_change(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -217,21 +217,10 @@ void nf_debug_ip_local_deliver(struct sk_buff *skb)
|
|||
* NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING. */
|
||||
if (!skb->dev) {
|
||||
printk("ip_local_deliver: skb->dev is NULL.\n");
|
||||
}
|
||||
else if (strcmp(skb->dev->name, "lo") == 0) {
|
||||
if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
|
||||
| (1 << NF_IP_POST_ROUTING)
|
||||
| (1 << NF_IP_PRE_ROUTING)
|
||||
| (1 << NF_IP_LOCAL_IN))) {
|
||||
printk("ip_local_deliver: bad loopback skb: ");
|
||||
debug_print_hooks_ip(skb->nf_debug);
|
||||
nf_dump_skb(PF_INET, skb);
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING)
|
||||
| (1<<NF_IP_LOCAL_IN))) {
|
||||
printk("ip_local_deliver: bad non-lo skb: ");
|
||||
printk("ip_local_deliver: bad skb: ");
|
||||
debug_print_hooks_ip(skb->nf_debug);
|
||||
nf_dump_skb(PF_INET, skb);
|
||||
}
|
||||
|
@ -247,8 +236,6 @@ void nf_debug_ip_loopback_xmit(struct sk_buff *newskb)
|
|||
debug_print_hooks_ip(newskb->nf_debug);
|
||||
nf_dump_skb(PF_INET, newskb);
|
||||
}
|
||||
/* Clear to avoid confusing input check */
|
||||
newskb->nf_debug = 0;
|
||||
}
|
||||
|
||||
void nf_debug_ip_finish_output2(struct sk_buff *skb)
|
||||
|
|
|
@ -86,30 +86,33 @@ struct sock *rtnl;
|
|||
|
||||
struct rtnetlink_link * rtnetlink_links[NPROTO];
|
||||
|
||||
static const int rtm_min[(RTM_MAX+1-RTM_BASE)/4] =
|
||||
static const int rtm_min[RTM_NR_FAMILIES] =
|
||||
{
|
||||
NLMSG_LENGTH(sizeof(struct ifinfomsg)),
|
||||
NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
|
||||
NLMSG_LENGTH(sizeof(struct rtmsg)),
|
||||
NLMSG_LENGTH(sizeof(struct ndmsg)),
|
||||
NLMSG_LENGTH(sizeof(struct rtmsg)),
|
||||
NLMSG_LENGTH(sizeof(struct tcmsg)),
|
||||
NLMSG_LENGTH(sizeof(struct tcmsg)),
|
||||
NLMSG_LENGTH(sizeof(struct tcmsg)),
|
||||
NLMSG_LENGTH(sizeof(struct tcamsg))
|
||||
[RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
|
||||
[RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
|
||||
[RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)),
|
||||
[RTM_FAM(RTM_NEWNEIGH)] = NLMSG_LENGTH(sizeof(struct ndmsg)),
|
||||
[RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct rtmsg)),
|
||||
[RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
|
||||
[RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
|
||||
[RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
|
||||
[RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)),
|
||||
[RTM_FAM(RTM_NEWPREFIX)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
|
||||
[RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
|
||||
[RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
|
||||
};
|
||||
|
||||
static const int rta_max[(RTM_MAX+1-RTM_BASE)/4] =
|
||||
static const int rta_max[RTM_NR_FAMILIES] =
|
||||
{
|
||||
IFLA_MAX,
|
||||
IFA_MAX,
|
||||
RTA_MAX,
|
||||
NDA_MAX,
|
||||
RTA_MAX,
|
||||
TCA_MAX,
|
||||
TCA_MAX,
|
||||
TCA_MAX,
|
||||
TCAA_MAX
|
||||
[RTM_FAM(RTM_NEWLINK)] = IFLA_MAX,
|
||||
[RTM_FAM(RTM_NEWADDR)] = IFA_MAX,
|
||||
[RTM_FAM(RTM_NEWROUTE)] = RTA_MAX,
|
||||
[RTM_FAM(RTM_NEWNEIGH)] = NDA_MAX,
|
||||
[RTM_FAM(RTM_NEWRULE)] = RTA_MAX,
|
||||
[RTM_FAM(RTM_NEWQDISC)] = TCA_MAX,
|
||||
[RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX,
|
||||
[RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX,
|
||||
[RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
|
||||
};
|
||||
|
||||
void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
|
||||
|
@ -606,27 +609,33 @@ static inline int rtnetlink_rcv_skb(struct sk_buff *skb)
|
|||
|
||||
/*
|
||||
* rtnetlink input queue processing routine:
|
||||
* - try to acquire shared lock. If it is failed, defer processing.
|
||||
* - process as much as there was in the queue upon entry.
|
||||
* - feed skbs to rtnetlink_rcv_skb, until it refuse a message,
|
||||
* that will occur, when a dump started and/or acquisition of
|
||||
* exclusive lock failed.
|
||||
* that will occur, when a dump started.
|
||||
*/
|
||||
|
||||
static void rtnetlink_rcv(struct sock *sk, int len)
|
||||
{
|
||||
unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
|
||||
|
||||
do {
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (rtnl_shlock_nowait())
|
||||
return;
|
||||
rtnl_lock();
|
||||
|
||||
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
||||
if (qlen > skb_queue_len(&sk->sk_receive_queue))
|
||||
qlen = skb_queue_len(&sk->sk_receive_queue);
|
||||
|
||||
for (; qlen; qlen--) {
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
if (rtnetlink_rcv_skb(skb)) {
|
||||
if (skb->len)
|
||||
skb_queue_head(&sk->sk_receive_queue,
|
||||
skb);
|
||||
else
|
||||
else {
|
||||
kfree_skb(skb);
|
||||
qlen--;
|
||||
}
|
||||
break;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
@ -635,10 +644,10 @@ static void rtnetlink_rcv(struct sock *sk, int len)
|
|||
up(&rtnl_sem);
|
||||
|
||||
netdev_run_todo();
|
||||
} while (rtnl && rtnl->sk_receive_queue.qlen);
|
||||
} while (qlen);
|
||||
}
|
||||
|
||||
static struct rtnetlink_link link_rtnetlink_table[RTM_MAX-RTM_BASE+1] =
|
||||
static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] =
|
||||
{
|
||||
[RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo },
|
||||
[RTM_SETLINK - RTM_BASE] = { .doit = do_setlink },
|
||||
|
|
|
@ -1411,21 +1411,22 @@ static struct file_operations dn_dev_seq_fops = {
|
|||
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
static struct rtnetlink_link dnet_rtnetlink_table[RTM_MAX-RTM_BASE+1] =
|
||||
static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] =
|
||||
{
|
||||
[4] = { .doit = dn_dev_rtm_newaddr, },
|
||||
[5] = { .doit = dn_dev_rtm_deladdr, },
|
||||
[6] = { .dumpit = dn_dev_dump_ifaddr, },
|
||||
|
||||
[RTM_NEWADDR - RTM_BASE] = { .doit = dn_dev_rtm_newaddr, },
|
||||
[RTM_DELADDR - RTM_BASE] = { .doit = dn_dev_rtm_deladdr, },
|
||||
[RTM_GETADDR - RTM_BASE] = { .dumpit = dn_dev_dump_ifaddr, },
|
||||
#ifdef CONFIG_DECNET_ROUTER
|
||||
[8] = { .doit = dn_fib_rtm_newroute, },
|
||||
[9] = { .doit = dn_fib_rtm_delroute, },
|
||||
[10] = { .doit = dn_cache_getroute, .dumpit = dn_fib_dump, },
|
||||
[16] = { .doit = dn_fib_rtm_newrule, },
|
||||
[17] = { .doit = dn_fib_rtm_delrule, },
|
||||
[18] = { .dumpit = dn_fib_dump_rules, },
|
||||
[RTM_NEWROUTE - RTM_BASE] = { .doit = dn_fib_rtm_newroute, },
|
||||
[RTM_DELROUTE - RTM_BASE] = { .doit = dn_fib_rtm_delroute, },
|
||||
[RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute,
|
||||
.dumpit = dn_fib_dump, },
|
||||
[RTM_NEWRULE - RTM_BASE] = { .doit = dn_fib_rtm_newrule, },
|
||||
[RTM_DELRULE - RTM_BASE] = { .doit = dn_fib_rtm_delrule, },
|
||||
[RTM_GETRULE - RTM_BASE] = { .dumpit = dn_fib_dump_rules, },
|
||||
#else
|
||||
[10] = { .doit = dn_cache_getroute, .dumpit = dn_cache_dump, },
|
||||
[RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute,
|
||||
.dumpit = dn_cache_dump,
|
||||
#endif
|
||||
|
||||
};
|
||||
|
|
|
@ -119,8 +119,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
|
|||
static void dnrmg_receive_user_sk(struct sock *sk, int len)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
|
||||
|
||||
while((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
||||
for (; qlen && (skb = skb_dequeue(&sk->sk_receive_queue)); qlen--) {
|
||||
dnrmg_receive_user_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
|
|
@ -1107,17 +1107,18 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa)
|
|||
}
|
||||
}
|
||||
|
||||
static struct rtnetlink_link inet_rtnetlink_table[RTM_MAX - RTM_BASE + 1] = {
|
||||
[4] = { .doit = inet_rtm_newaddr, },
|
||||
[5] = { .doit = inet_rtm_deladdr, },
|
||||
[6] = { .dumpit = inet_dump_ifaddr, },
|
||||
[8] = { .doit = inet_rtm_newroute, },
|
||||
[9] = { .doit = inet_rtm_delroute, },
|
||||
[10] = { .doit = inet_rtm_getroute, .dumpit = inet_dump_fib, },
|
||||
static struct rtnetlink_link inet_rtnetlink_table[RTM_NR_MSGTYPES] = {
|
||||
[RTM_NEWADDR - RTM_BASE] = { .doit = inet_rtm_newaddr, },
|
||||
[RTM_DELADDR - RTM_BASE] = { .doit = inet_rtm_deladdr, },
|
||||
[RTM_GETADDR - RTM_BASE] = { .dumpit = inet_dump_ifaddr, },
|
||||
[RTM_NEWROUTE - RTM_BASE] = { .doit = inet_rtm_newroute, },
|
||||
[RTM_DELROUTE - RTM_BASE] = { .doit = inet_rtm_delroute, },
|
||||
[RTM_GETROUTE - RTM_BASE] = { .doit = inet_rtm_getroute,
|
||||
.dumpit = inet_dump_fib, },
|
||||
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
||||
[16] = { .doit = inet_rtm_newrule, },
|
||||
[17] = { .doit = inet_rtm_delrule, },
|
||||
[18] = { .dumpit = inet_dump_rules, },
|
||||
[RTM_NEWRULE - RTM_BASE] = { .doit = inet_rtm_newrule, },
|
||||
[RTM_DELRULE - RTM_BASE] = { .doit = inet_rtm_delrule, },
|
||||
[RTM_GETRULE - RTM_BASE] = { .dumpit = inet_dump_rules, },
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -111,6 +111,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
|
|||
#ifdef CONFIG_NETFILTER_DEBUG
|
||||
nf_debug_ip_loopback_xmit(newskb);
|
||||
#endif
|
||||
nf_reset(newskb);
|
||||
netif_rx(newskb);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -819,6 +819,7 @@ static int tcp_error(struct sk_buff *skb,
|
|||
*/
|
||||
/* FIXME: Source route IP option packets --RR */
|
||||
if (hooknum == NF_IP_PRE_ROUTING
|
||||
&& skb->ip_summed != CHECKSUM_UNNECESSARY
|
||||
&& csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP,
|
||||
skb->ip_summed == CHECKSUM_HW ? skb->csum
|
||||
: skb_checksum(skb, iph->ihl*4, tcplen, 0))) {
|
||||
|
|
|
@ -546,20 +546,18 @@ ipq_rcv_skb(struct sk_buff *skb)
|
|||
static void
|
||||
ipq_rcv_sk(struct sock *sk, int len)
|
||||
{
|
||||
do {
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb;
|
||||
unsigned int qlen;
|
||||
|
||||
if (down_trylock(&ipqnl_sem))
|
||||
return;
|
||||
down(&ipqnl_sem);
|
||||
|
||||
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
||||
ipq_rcv_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
ipq_rcv_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
up(&ipqnl_sem);
|
||||
|
||||
} while (ipqnl && ipqnl->sk_receive_queue.qlen);
|
||||
up(&ipqnl_sem);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -103,13 +103,15 @@ static struct nf_hook_ops ipt_ops[] = {
|
|||
.hook = ipt_hook,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_IP_PRE_ROUTING,
|
||||
.priority = NF_IP_PRI_RAW
|
||||
.priority = NF_IP_PRI_RAW,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_hook,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_IP_LOCAL_OUT,
|
||||
.priority = NF_IP_PRI_RAW
|
||||
.priority = NF_IP_PRI_RAW,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -777,8 +777,9 @@ static inline void tcpdiag_rcv_skb(struct sk_buff *skb)
|
|||
static void tcpdiag_rcv(struct sock *sk, int len)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
|
||||
|
||||
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
||||
while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) {
|
||||
tcpdiag_rcv_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
|
|
@ -222,10 +222,13 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
|
|||
int rover;
|
||||
|
||||
spin_lock(&tcp_portalloc_lock);
|
||||
rover = tcp_port_rover;
|
||||
if (tcp_port_rover < low)
|
||||
rover = low;
|
||||
else
|
||||
rover = tcp_port_rover;
|
||||
do {
|
||||
rover++;
|
||||
if (rover < low || rover > high)
|
||||
if (rover > high)
|
||||
rover = low;
|
||||
head = &tcp_bhash[tcp_bhashfn(rover)];
|
||||
spin_lock(&head->lock);
|
||||
|
|
|
@ -8,7 +8,10 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <asm/bug.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/config.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
|
@ -152,6 +155,8 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
|
|||
x->u.rt.rt_dst = rt0->rt_dst;
|
||||
x->u.rt.rt_gateway = rt->rt_gateway;
|
||||
x->u.rt.rt_spec_dst = rt0->rt_spec_dst;
|
||||
x->u.rt.idev = rt0->idev;
|
||||
in_dev_hold(rt0->idev);
|
||||
header_len -= x->u.dst.xfrm->props.header_len;
|
||||
trailer_len -= x->u.dst.xfrm->props.trailer_len;
|
||||
}
|
||||
|
@ -243,11 +248,48 @@ static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
|
|||
path->ops->update_pmtu(path, mtu);
|
||||
}
|
||||
|
||||
static void xfrm4_dst_destroy(struct dst_entry *dst)
|
||||
{
|
||||
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
||||
|
||||
if (likely(xdst->u.rt.idev))
|
||||
in_dev_put(xdst->u.rt.idev);
|
||||
xfrm_dst_destroy(xdst);
|
||||
}
|
||||
|
||||
static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
||||
int unregister)
|
||||
{
|
||||
struct xfrm_dst *xdst;
|
||||
|
||||
if (!unregister)
|
||||
return;
|
||||
|
||||
xdst = (struct xfrm_dst *)dst;
|
||||
if (xdst->u.rt.idev->dev == dev) {
|
||||
struct in_device *loopback_idev = in_dev_get(&loopback_dev);
|
||||
BUG_ON(!loopback_idev);
|
||||
|
||||
do {
|
||||
in_dev_put(xdst->u.rt.idev);
|
||||
xdst->u.rt.idev = loopback_idev;
|
||||
in_dev_hold(loopback_idev);
|
||||
xdst = (struct xfrm_dst *)xdst->u.dst.child;
|
||||
} while (xdst->u.dst.xfrm);
|
||||
|
||||
__in_dev_put(loopback_idev);
|
||||
}
|
||||
|
||||
xfrm_dst_ifdown(dst, dev);
|
||||
}
|
||||
|
||||
static struct dst_ops xfrm4_dst_ops = {
|
||||
.family = AF_INET,
|
||||
.protocol = __constant_htons(ETH_P_IP),
|
||||
.gc = xfrm4_garbage_collect,
|
||||
.update_pmtu = xfrm4_update_pmtu,
|
||||
.destroy = xfrm4_dst_destroy,
|
||||
.ifdown = xfrm4_dst_ifdown,
|
||||
.gc_thresh = 1024,
|
||||
.entry_size = sizeof(struct xfrm_dst),
|
||||
};
|
||||
|
|
|
@ -3076,7 +3076,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
|
|||
netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_PREFIX, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static struct rtnetlink_link inet6_rtnetlink_table[RTM_MAX - RTM_BASE + 1] = {
|
||||
static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = {
|
||||
[RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, },
|
||||
[RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, },
|
||||
[RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, },
|
||||
|
|
|
@ -549,20 +549,18 @@ ipq_rcv_skb(struct sk_buff *skb)
|
|||
static void
|
||||
ipq_rcv_sk(struct sock *sk, int len)
|
||||
{
|
||||
do {
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb;
|
||||
unsigned int qlen;
|
||||
|
||||
if (down_trylock(&ipqnl_sem))
|
||||
return;
|
||||
down(&ipqnl_sem);
|
||||
|
||||
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
||||
ipq_rcv_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
ipq_rcv_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
up(&ipqnl_sem);
|
||||
|
||||
} while (ipqnl && ipqnl->sk_receive_queue.qlen);
|
||||
up(&ipqnl_sem);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -455,11 +455,11 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|||
static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
|
||||
struct raw6_sock *rp)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
int err = 0;
|
||||
int offset;
|
||||
int len;
|
||||
int total_len;
|
||||
u32 tmp_csum;
|
||||
u16 csum;
|
||||
|
||||
|
@ -470,7 +470,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
|
|||
goto out;
|
||||
|
||||
offset = rp->offset;
|
||||
if (offset >= inet->cork.length - 1) {
|
||||
total_len = inet_sk(sk)->cork.length - (skb->nh.raw - skb->data);
|
||||
if (offset >= total_len - 1) {
|
||||
err = -EINVAL;
|
||||
ip6_flush_pending_frames(sk);
|
||||
goto out;
|
||||
|
@ -514,7 +515,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
|
|||
|
||||
tmp_csum = csum_ipv6_magic(&fl->fl6_src,
|
||||
&fl->fl6_dst,
|
||||
inet->cork.length, fl->proto, tmp_csum);
|
||||
total_len, fl->proto, tmp_csum);
|
||||
|
||||
if (tmp_csum == 0)
|
||||
tmp_csum = -1;
|
||||
|
|
|
@ -139,9 +139,12 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
|
|||
int rover;
|
||||
|
||||
spin_lock(&tcp_portalloc_lock);
|
||||
rover = tcp_port_rover;
|
||||
if (tcp_port_rover < low)
|
||||
rover = low;
|
||||
else
|
||||
rover = tcp_port_rover;
|
||||
do { rover++;
|
||||
if ((rover < low) || (rover > high))
|
||||
if (rover > high)
|
||||
rover = low;
|
||||
head = &tcp_bhash[tcp_bhashfn(rover)];
|
||||
spin_lock(&head->lock);
|
||||
|
|
|
@ -11,7 +11,11 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <asm/bug.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/config.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/addrconf.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
|
@ -166,6 +170,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
|
|||
memcpy(&x->u.rt6.rt6i_gateway, &rt0->rt6i_gateway, sizeof(x->u.rt6.rt6i_gateway));
|
||||
x->u.rt6.rt6i_dst = rt0->rt6i_dst;
|
||||
x->u.rt6.rt6i_src = rt0->rt6i_src;
|
||||
x->u.rt6.rt6i_idev = rt0->rt6i_idev;
|
||||
in6_dev_hold(rt0->rt6i_idev);
|
||||
header_len -= x->u.dst.xfrm->props.header_len;
|
||||
trailer_len -= x->u.dst.xfrm->props.trailer_len;
|
||||
}
|
||||
|
@ -251,11 +257,48 @@ static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
|
|||
path->ops->update_pmtu(path, mtu);
|
||||
}
|
||||
|
||||
static void xfrm6_dst_destroy(struct dst_entry *dst)
|
||||
{
|
||||
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
||||
|
||||
if (likely(xdst->u.rt6.rt6i_idev))
|
||||
in6_dev_put(xdst->u.rt6.rt6i_idev);
|
||||
xfrm_dst_destroy(xdst);
|
||||
}
|
||||
|
||||
static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
||||
int unregister)
|
||||
{
|
||||
struct xfrm_dst *xdst;
|
||||
|
||||
if (!unregister)
|
||||
return;
|
||||
|
||||
xdst = (struct xfrm_dst *)dst;
|
||||
if (xdst->u.rt6.rt6i_idev->dev == dev) {
|
||||
struct inet6_dev *loopback_idev = in6_dev_get(&loopback_dev);
|
||||
BUG_ON(!loopback_idev);
|
||||
|
||||
do {
|
||||
in6_dev_put(xdst->u.rt6.rt6i_idev);
|
||||
xdst->u.rt6.rt6i_idev = loopback_idev;
|
||||
in6_dev_hold(loopback_idev);
|
||||
xdst = (struct xfrm_dst *)xdst->u.dst.child;
|
||||
} while (xdst->u.dst.xfrm);
|
||||
|
||||
__in6_dev_put(loopback_idev);
|
||||
}
|
||||
|
||||
xfrm_dst_ifdown(dst, dev);
|
||||
}
|
||||
|
||||
static struct dst_ops xfrm6_dst_ops = {
|
||||
.family = AF_INET6,
|
||||
.protocol = __constant_htons(ETH_P_IPV6),
|
||||
.gc = xfrm6_garbage_collect,
|
||||
.update_pmtu = xfrm6_update_pmtu,
|
||||
.destroy = xfrm6_dst_destroy,
|
||||
.ifdown = xfrm6_dst_ifdown,
|
||||
.gc_thresh = 1024,
|
||||
.entry_size = sizeof(struct xfrm_dst),
|
||||
};
|
||||
|
|
|
@ -373,7 +373,6 @@ static int netlink_release(struct socket *sock)
|
|||
nlk->cb->done(nlk->cb);
|
||||
netlink_destroy_callback(nlk->cb);
|
||||
nlk->cb = NULL;
|
||||
__sock_put(sk);
|
||||
}
|
||||
spin_unlock(&nlk->cb_lock);
|
||||
|
||||
|
@ -1099,7 +1098,6 @@ static int netlink_dump(struct sock *sk)
|
|||
spin_unlock(&nlk->cb_lock);
|
||||
|
||||
netlink_destroy_callback(cb);
|
||||
__sock_put(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1138,7 +1136,6 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|||
return -EBUSY;
|
||||
}
|
||||
nlk->cb = cb;
|
||||
sock_hold(sk);
|
||||
spin_unlock(&nlk->cb_lock);
|
||||
|
||||
netlink_dump(sk);
|
||||
|
|
|
@ -185,7 +185,7 @@ config NET_SCH_GRED
|
|||
depends on NET_SCHED
|
||||
help
|
||||
Say Y here if you want to use the Generic Random Early Detection
|
||||
(RED) packet scheduling algorithm for some of your network devices
|
||||
(GRED) packet scheduling algorithm for some of your network devices
|
||||
(see the top of <file:net/sched/sch_red.c> for details and
|
||||
references about the algorithm).
|
||||
|
||||
|
|
|
@ -171,10 +171,10 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
|
|||
skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
|
||||
skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
|
||||
}
|
||||
if (ret != TC_ACT_PIPE)
|
||||
goto exec_done;
|
||||
if (ret == TC_ACT_REPEAT)
|
||||
goto repeat; /* we need a ttl - JHS */
|
||||
if (ret != TC_ACT_PIPE)
|
||||
goto exec_done;
|
||||
}
|
||||
act = a->next;
|
||||
}
|
||||
|
|
|
@ -1289,6 +1289,7 @@ static int __init pktsched_init(void)
|
|||
|
||||
subsys_initcall(pktsched_init);
|
||||
|
||||
EXPORT_SYMBOL(qdisc_lookup);
|
||||
EXPORT_SYMBOL(qdisc_get_rtab);
|
||||
EXPORT_SYMBOL(qdisc_put_rtab);
|
||||
EXPORT_SYMBOL(register_qdisc);
|
||||
|
|
|
@ -179,6 +179,7 @@ int qdisc_restart(struct net_device *dev)
|
|||
netif_schedule(dev);
|
||||
return 1;
|
||||
}
|
||||
BUG_ON((int) q->q.qlen < 0);
|
||||
return q->q.qlen;
|
||||
}
|
||||
|
||||
|
@ -539,6 +540,10 @@ void dev_activate(struct net_device *dev)
|
|||
write_unlock_bh(&qdisc_tree_lock);
|
||||
}
|
||||
|
||||
if (!netif_carrier_ok(dev))
|
||||
/* Delay activation until next carrier-on event */
|
||||
return;
|
||||
|
||||
spin_lock_bh(&dev->queue_lock);
|
||||
rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
|
||||
if (dev->qdisc != &noqueue_qdisc) {
|
||||
|
|
|
@ -717,6 +717,10 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
if (q->direct_queue.qlen < q->direct_qlen) {
|
||||
__skb_queue_tail(&q->direct_queue, skb);
|
||||
q->direct_pkts++;
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
sch->qstats.drops++;
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
} else if (!cl) {
|
||||
|
|
|
@ -138,38 +138,77 @@ static long tabledist(unsigned long mu, long sigma,
|
|||
}
|
||||
|
||||
/* Put skb in the private delayed queue. */
|
||||
static int delay_skb(struct Qdisc *sch, struct sk_buff *skb)
|
||||
static int netem_delay(struct Qdisc *sch, struct sk_buff *skb)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
|
||||
psched_tdiff_t td;
|
||||
psched_time_t now;
|
||||
|
||||
PSCHED_GET_TIME(now);
|
||||
td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist);
|
||||
PSCHED_TADD2(now, td, cb->time_to_send);
|
||||
|
||||
/* Always queue at tail to keep packets in order */
|
||||
if (likely(q->delayed.qlen < q->limit)) {
|
||||
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
|
||||
|
||||
PSCHED_TADD2(now, td, cb->time_to_send);
|
||||
|
||||
pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb,
|
||||
now, cb->time_to_send);
|
||||
|
||||
__skb_queue_tail(&q->delayed, skb);
|
||||
if (!timer_pending(&q->timer)) {
|
||||
q->timer.expires = jiffies + PSCHED_US2JIFFIE(td);
|
||||
add_timer(&q->timer);
|
||||
}
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
pr_debug("netem_delay: queue over limit %d\n", q->limit);
|
||||
sch->qstats.overlimits++;
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move a packet that is ready to send from the delay holding
|
||||
* list to the underlying qdisc.
|
||||
*/
|
||||
static int netem_run(struct Qdisc *sch)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
struct sk_buff *skb;
|
||||
psched_time_t now;
|
||||
|
||||
PSCHED_GET_TIME(now);
|
||||
|
||||
skb = skb_peek(&q->delayed);
|
||||
if (skb) {
|
||||
const struct netem_skb_cb *cb
|
||||
= (const struct netem_skb_cb *)skb->cb;
|
||||
long delay
|
||||
= PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
|
||||
pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
|
||||
|
||||
/* if more time remaining? */
|
||||
if (delay > 0) {
|
||||
mod_timer(&q->timer, jiffies + delay);
|
||||
return 1;
|
||||
}
|
||||
|
||||
__skb_unlink(skb, &q->delayed);
|
||||
|
||||
if (q->qdisc->enqueue(skb, q->qdisc)) {
|
||||
sch->q.qlen--;
|
||||
sch->qstats.drops++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
struct sk_buff *skb2;
|
||||
int ret;
|
||||
|
||||
pr_debug("netem_enqueue skb=%p @%lu\n", skb, jiffies);
|
||||
pr_debug("netem_enqueue skb=%p\n", skb);
|
||||
|
||||
/* Random packet drop 0 => none, ~0 => all */
|
||||
if (q->loss && q->loss >= get_crandom(&q->loss_cor)) {
|
||||
|
@ -180,11 +219,21 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
}
|
||||
|
||||
/* Random duplication */
|
||||
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)
|
||||
&& (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
|
||||
pr_debug("netem_enqueue: dup %p\n", skb2);
|
||||
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) {
|
||||
struct sk_buff *skb2;
|
||||
|
||||
skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) {
|
||||
struct Qdisc *qp;
|
||||
|
||||
/* Since one packet can generate two packets in the
|
||||
* queue, the parent's qlen accounting gets confused,
|
||||
* so fix it.
|
||||
*/
|
||||
qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent));
|
||||
if (qp)
|
||||
qp->q.qlen++;
|
||||
|
||||
if (delay_skb(sch, skb2)) {
|
||||
sch->q.qlen++;
|
||||
sch->bstats.bytes += skb2->len;
|
||||
sch->bstats.packets++;
|
||||
|
@ -202,7 +251,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
ret = q->qdisc->enqueue(skb, q->qdisc);
|
||||
} else {
|
||||
q->counter = 0;
|
||||
ret = delay_skb(sch, skb);
|
||||
ret = netem_delay(sch, skb);
|
||||
netem_run(sch);
|
||||
}
|
||||
|
||||
if (likely(ret == NET_XMIT_SUCCESS)) {
|
||||
|
@ -212,6 +262,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
} else
|
||||
sch->qstats.drops++;
|
||||
|
||||
pr_debug("netem: enqueue ret %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -241,56 +292,35 @@ static unsigned int netem_drop(struct Qdisc* sch)
|
|||
return len;
|
||||
}
|
||||
|
||||
/* Dequeue packet.
|
||||
* Move all packets that are ready to send from the delay holding
|
||||
* list to the underlying qdisc, then just call dequeue
|
||||
*/
|
||||
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
struct sk_buff *skb;
|
||||
int pending;
|
||||
|
||||
pending = netem_run(sch);
|
||||
|
||||
skb = q->qdisc->dequeue(q->qdisc);
|
||||
if (skb)
|
||||
if (skb) {
|
||||
pr_debug("netem_dequeue: return skb=%p\n", skb);
|
||||
sch->q.qlen--;
|
||||
sch->flags &= ~TCQ_F_THROTTLED;
|
||||
}
|
||||
else if (pending) {
|
||||
pr_debug("netem_dequeue: throttling\n");
|
||||
sch->flags |= TCQ_F_THROTTLED;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void netem_watchdog(unsigned long arg)
|
||||
{
|
||||
struct Qdisc *sch = (struct Qdisc *)arg;
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
struct net_device *dev = sch->dev;
|
||||
struct sk_buff *skb;
|
||||
psched_time_t now;
|
||||
|
||||
pr_debug("netem_watchdog: fired @%lu\n", jiffies);
|
||||
|
||||
spin_lock_bh(&dev->queue_lock);
|
||||
PSCHED_GET_TIME(now);
|
||||
|
||||
while ((skb = skb_peek(&q->delayed)) != NULL) {
|
||||
const struct netem_skb_cb *cb
|
||||
= (const struct netem_skb_cb *)skb->cb;
|
||||
long delay
|
||||
= PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
|
||||
pr_debug("netem_watchdog: skb %p@%lu %ld\n",
|
||||
skb, jiffies, delay);
|
||||
|
||||
/* if more time remaining? */
|
||||
if (delay > 0) {
|
||||
mod_timer(&q->timer, jiffies + delay);
|
||||
break;
|
||||
}
|
||||
__skb_unlink(skb, &q->delayed);
|
||||
|
||||
if (q->qdisc->enqueue(skb, q->qdisc)) {
|
||||
sch->q.qlen--;
|
||||
sch->qstats.drops++;
|
||||
}
|
||||
}
|
||||
qdisc_run(dev);
|
||||
spin_unlock_bh(&dev->queue_lock);
|
||||
pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen);
|
||||
sch->flags &= ~TCQ_F_THROTTLED;
|
||||
netif_schedule(sch->dev);
|
||||
}
|
||||
|
||||
static void netem_reset(struct Qdisc *sch)
|
||||
|
@ -301,6 +331,7 @@ static void netem_reset(struct Qdisc *sch)
|
|||
skb_queue_purge(&q->delayed);
|
||||
|
||||
sch->q.qlen = 0;
|
||||
sch->flags &= ~TCQ_F_THROTTLED;
|
||||
del_timer_sync(&q->timer);
|
||||
}
|
||||
|
||||
|
|
|
@ -1028,30 +1028,15 @@ static int stale_bundle(struct dst_entry *dst)
|
|||
return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC);
|
||||
}
|
||||
|
||||
static void xfrm_dst_destroy(struct dst_entry *dst)
|
||||
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
|
||||
{
|
||||
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
||||
|
||||
dst_release(xdst->route);
|
||||
|
||||
if (!dst->xfrm)
|
||||
return;
|
||||
xfrm_state_put(dst->xfrm);
|
||||
dst->xfrm = NULL;
|
||||
}
|
||||
|
||||
static void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
||||
int unregister)
|
||||
{
|
||||
if (!unregister)
|
||||
return;
|
||||
|
||||
while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
|
||||
dst->dev = &loopback_dev;
|
||||
dev_hold(&loopback_dev);
|
||||
dev_put(dev);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(xfrm_dst_ifdown);
|
||||
|
||||
static void xfrm_link_failure(struct sk_buff *skb)
|
||||
{
|
||||
|
@ -1262,10 +1247,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
|
|||
dst_ops->kmem_cachep = xfrm_dst_cache;
|
||||
if (likely(dst_ops->check == NULL))
|
||||
dst_ops->check = xfrm_dst_check;
|
||||
if (likely(dst_ops->destroy == NULL))
|
||||
dst_ops->destroy = xfrm_dst_destroy;
|
||||
if (likely(dst_ops->ifdown == NULL))
|
||||
dst_ops->ifdown = xfrm_dst_ifdown;
|
||||
if (likely(dst_ops->negative_advice == NULL))
|
||||
dst_ops->negative_advice = xfrm_negative_advice;
|
||||
if (likely(dst_ops->link_failure == NULL))
|
||||
|
@ -1297,8 +1278,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
|
|||
xfrm_policy_afinfo[afinfo->family] = NULL;
|
||||
dst_ops->kmem_cachep = NULL;
|
||||
dst_ops->check = NULL;
|
||||
dst_ops->destroy = NULL;
|
||||
dst_ops->ifdown = NULL;
|
||||
dst_ops->negative_advice = NULL;
|
||||
dst_ops->link_failure = NULL;
|
||||
dst_ops->get_mss = NULL;
|
||||
|
|
|
@ -855,47 +855,44 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **x
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const int xfrm_msg_min[(XFRM_MSG_MAX + 1 - XFRM_MSG_BASE)] = {
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* NEW SA */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* DEL SA */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* GET SA */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* NEW POLICY */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* DEL POLICY */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* GET POLICY */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_userspi_info)), /* ALLOC SPI */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_user_acquire)), /* ACQUIRE */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_user_expire)), /* EXPIRE */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* UPD POLICY */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* UPD SA */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_user_polexpire)), /* POLEXPIRE */
|
||||
NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)), /* FLUSH SA */
|
||||
NLMSG_LENGTH(0), /* FLUSH POLICY */
|
||||
#define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type))
|
||||
|
||||
static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
|
||||
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
|
||||
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
|
||||
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
|
||||
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
|
||||
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
|
||||
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
|
||||
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
|
||||
[XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
|
||||
[XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
|
||||
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
|
||||
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
|
||||
[XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
|
||||
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
|
||||
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0),
|
||||
};
|
||||
|
||||
#undef XMSGSIZE
|
||||
|
||||
static struct xfrm_link {
|
||||
int (*doit)(struct sk_buff *, struct nlmsghdr *, void **);
|
||||
int (*dump)(struct sk_buff *, struct netlink_callback *);
|
||||
} xfrm_dispatch[] = {
|
||||
{ .doit = xfrm_add_sa, },
|
||||
{ .doit = xfrm_del_sa, },
|
||||
{
|
||||
.doit = xfrm_get_sa,
|
||||
.dump = xfrm_dump_sa,
|
||||
},
|
||||
{ .doit = xfrm_add_policy },
|
||||
{ .doit = xfrm_get_policy },
|
||||
{
|
||||
.doit = xfrm_get_policy,
|
||||
.dump = xfrm_dump_policy,
|
||||
},
|
||||
{ .doit = xfrm_alloc_userspi },
|
||||
{},
|
||||
{},
|
||||
{ .doit = xfrm_add_policy },
|
||||
{ .doit = xfrm_add_sa, },
|
||||
{},
|
||||
{ .doit = xfrm_flush_sa },
|
||||
{ .doit = xfrm_flush_policy },
|
||||
} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
|
||||
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
|
||||
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
|
||||
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
|
||||
.dump = xfrm_dump_sa },
|
||||
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
|
||||
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
|
||||
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
|
||||
.dump = xfrm_dump_policy },
|
||||
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
|
||||
[XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
|
||||
[XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
|
||||
[XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
|
||||
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
|
||||
};
|
||||
|
||||
static int xfrm_done(struct netlink_callback *cb)
|
||||
|
@ -931,7 +928,9 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err
|
|||
return -1;
|
||||
}
|
||||
|
||||
if ((type == 2 || type == 5) && (nlh->nlmsg_flags & NLM_F_DUMP)) {
|
||||
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
|
||||
type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
|
||||
(nlh->nlmsg_flags & NLM_F_DUMP)) {
|
||||
u32 rlen;
|
||||
|
||||
if (link->dump == NULL)
|
||||
|
@ -1009,18 +1008,26 @@ static int xfrm_user_rcv_skb(struct sk_buff *skb)
|
|||
|
||||
static void xfrm_netlink_rcv(struct sock *sk, int len)
|
||||
{
|
||||
unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
|
||||
|
||||
do {
|
||||
struct sk_buff *skb;
|
||||
|
||||
down(&xfrm_cfg_sem);
|
||||
|
||||
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
||||
if (qlen > skb_queue_len(&sk->sk_receive_queue))
|
||||
qlen = skb_queue_len(&sk->sk_receive_queue);
|
||||
|
||||
for (; qlen; qlen--) {
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
if (xfrm_user_rcv_skb(skb)) {
|
||||
if (skb->len)
|
||||
skb_queue_head(&sk->sk_receive_queue,
|
||||
skb);
|
||||
else
|
||||
else {
|
||||
kfree_skb(skb);
|
||||
qlen--;
|
||||
}
|
||||
break;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
@ -1028,7 +1035,7 @@ static void xfrm_netlink_rcv(struct sock *sk, int len)
|
|||
|
||||
up(&xfrm_cfg_sem);
|
||||
|
||||
} while (xfrm_nl && xfrm_nl->sk_receive_queue.qlen);
|
||||
} while (qlen);
|
||||
}
|
||||
|
||||
static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard)
|
||||
|
|
Loading…
Reference in New Issue