mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) IPVS oops'ers: a) Should not reset skb->nf_bridge in forwarding hook (Lin Ming) b) 3.4 commit can cause ip_vs_control_cleanup to be invoked after the ipvs_core_ops are unregistered during rmmod (Julian ANastasov) 2) ixgbevf bringup failure can crash in TX descriptor cleanup (Alexander Duyck) 3) AX25 switch missing break statement hoses ROSE sockets (Alan Cox) 4) CAIF accesses freed per-net memory (Sjur Brandeland) 5) Network cgroup code has out-or-bounds accesses (Eric DUmazet), and accesses freed memory (Gao Feng) 6) Fix a crash in SCTP reported by Dave Jones caused by freeing an association still on a list (Neil HOrman) 7) __netdev_alloc_skb() regresses on GFP_DMA using drivers because that GFP flag is not being retained for the allocation (Eric Dumazet). 8) Missing NULL hceck in sch_sfb netlink message parsing (Alan Cox) 9) bnx2 crashes because TX index iteration is not bounded correctly (Michael Chan) 10) IPoIB generates warnings in TCP queue collapsing (via skb_try_coalesce) because it does not set skb->truesize correctly (Eric Dumazet) 11) vlan_info objects leak for the implicit vlan with ID 0 (Amir Hanania) 12) A fix for TX time stamp handling in gianfar does not transfer socket ownership from one packet to another correctly, resulting in a socket write space imbalance (Eric Dumazet) 13) Julia Lawall found several cases where we do a list iteration, and then at the loop termination unconditionally assume we ended up with real list object, rather than the list head itself (CNIC, RXRPC, mISDN). 14) The bonding driver handles procfs moving incorrectly when a device it manages is moved from one namespace to another (Eric Biederman) 15) Missing memory barriers in stmmac descriptor accesses result in various crashes (Deepak Sikri) 16) Fix handling of broadcast packets in batman-adv (Simon Wunderlich) 17) Properly check the sanity of sendmsg() lengths in ieee802154's dgram_sendmsg(). Dave Jones and others have hit and reported this bug (Sasha Levin) 18) Some drivers (b44 and b43legacy) on 64-bit machines stopped working because of how netdev_alloc_skb() was adjusted. Such drivers should now use alloc_skb() for obtaining bounce buffers. (Eric Dumazet) 19) atl1c mis-managed it's link state in that it stops the queue by hand on link down. The generic networking takes care of that and this double stop locks the queue down. So simply removing the driver's queue stop call fixes the problem (Cloud Ren) 20) Fix out-of-memory due to mis-accounting in net_em packet scheduler (Eric Dumazet) 21) If DCB and SR-IOV are configured at the same time in IXGBE the chip will hang because this is not supported (Alexander Duyck) 22) A commit to stop drivers using netdev->base_addr broke the CNIC driver (Michael Chan) 23) Timeout regression in ipset caused by an attempt to fix an overflow bug (Jozsef Kadlecsik). 24) mac80211 minstrel code allocates memory using incorrect size (Thomas Huehn) 25) llcp_sock_getname() needs to check for a NULL device otherwise we OOPS (Sasha Levin) 26) mwifiex leaks memory (Bing Zhao) 27) Propagate iwlwifi fix to iwlegacy, even when we're not associated we need to monitor for stuck queues in the watchdog handler (Stanislaw Geuszka) * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits) ipvs: fix oops in ip_vs_dst_event on rmmod ipvs: fix oops on NAT reply in br_nf context ixgbevf: Fix panic when loading driver ax25: Fix missing break MAINTAINERS: reflect actual changes in IEEE 802.15.4 maintainership caif: Fix access to freed pernet memory net: cgroup: fix access the unallocated memory in netprio cgroup ixgbevf: Prevent RX/TX statistics getting reset to zero sctp: Fix list corruption resulting from freeing an association on a list net: respect GFP_DMA in __netdev_alloc_skb() e1000e: fix test for PHY being accessible on 82577/8/9 and I217 e1000e: Correct link check logic for 82571 serdes sch_sfb: Fix missing NULL check bnx2: Fix bug in bnx2_free_tx_skbs(). IPoIB: fix skb truesize underestimatiom net: Fix memory leak - vlan_info struct gianfar: fix potential sk_wmem_alloc imbalance drivers/net/ethernet/broadcom/cnic.c: remove invalid reference to list iterator variable net/rxrpc/ar-peer.c: remove invalid reference to list iterator variable drivers/isdn/mISDN/stack.c: remove invalid reference to list iterator variable ...
This commit is contained in:
commit
a018540141
|
@ -3433,13 +3433,14 @@ S: Supported
|
||||||
F: drivers/idle/i7300_idle.c
|
F: drivers/idle/i7300_idle.c
|
||||||
|
|
||||||
IEEE 802.15.4 SUBSYSTEM
|
IEEE 802.15.4 SUBSYSTEM
|
||||||
|
M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
|
||||||
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
|
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
|
||||||
M: Sergey Lapin <slapin@ossfans.org>
|
|
||||||
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
|
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
|
||||||
W: http://apps.sourceforge.net/trac/linux-zigbee
|
W: http://apps.sourceforge.net/trac/linux-zigbee
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: net/ieee802154/
|
F: net/ieee802154/
|
||||||
|
F: net/mac802154/
|
||||||
F: drivers/ieee802154/
|
F: drivers/ieee802154/
|
||||||
|
|
||||||
IIO SUBSYSTEM AND DRIVERS
|
IIO SUBSYSTEM AND DRIVERS
|
||||||
|
|
|
@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
|
||||||
|
|
||||||
skb_frag_size_set(frag, size);
|
skb_frag_size_set(frag, size);
|
||||||
skb->data_len += size;
|
skb->data_len += size;
|
||||||
skb->truesize += size;
|
skb->truesize += PAGE_SIZE;
|
||||||
} else
|
} else
|
||||||
skb_put(skb, length);
|
skb_put(skb, length);
|
||||||
|
|
||||||
|
@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
|
||||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int buf_size;
|
int buf_size;
|
||||||
|
int tailroom;
|
||||||
u64 *mapping;
|
u64 *mapping;
|
||||||
|
|
||||||
if (ipoib_ud_need_sg(priv->max_ib_mtu))
|
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
|
||||||
buf_size = IPOIB_UD_HEAD_SIZE;
|
buf_size = IPOIB_UD_HEAD_SIZE;
|
||||||
else
|
tailroom = 128; /* reserve some tailroom for IP/TCP headers */
|
||||||
|
} else {
|
||||||
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
|
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
|
||||||
|
tailroom = 0;
|
||||||
|
}
|
||||||
|
|
||||||
skb = dev_alloc_skb(buf_size + 4);
|
skb = dev_alloc_skb(buf_size + tailroom + 4);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
|
||||||
skb = NULL;
|
skb = NULL;
|
||||||
else if (*debug & DEBUG_SEND_ERR)
|
else if (*debug & DEBUG_SEND_ERR)
|
||||||
printk(KERN_DEBUG
|
printk(KERN_DEBUG
|
||||||
"%s ch%d mgr prim(%x) addr(%x) err %d\n",
|
"%s mgr prim(%x) err %d\n",
|
||||||
__func__, ch->nr, hh->prim, ch->addr, ret);
|
__func__, hh->prim, ret);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&st->lmutex);
|
mutex_unlock(&st->lmutex);
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
#include "bonding.h"
|
#include "bonding.h"
|
||||||
#include "bond_alb.h"
|
#include "bond_alb.h"
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
|
||||||
|
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
|
|
|
@ -3227,6 +3227,12 @@ static int bond_master_netdev_event(unsigned long event,
|
||||||
switch (event) {
|
switch (event) {
|
||||||
case NETDEV_CHANGENAME:
|
case NETDEV_CHANGENAME:
|
||||||
return bond_event_changename(event_bond);
|
return bond_event_changename(event_bond);
|
||||||
|
case NETDEV_UNREGISTER:
|
||||||
|
bond_remove_proc_entry(event_bond);
|
||||||
|
break;
|
||||||
|
case NETDEV_REGISTER:
|
||||||
|
bond_create_proc_entry(event_bond);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -4411,8 +4417,6 @@ static void bond_uninit(struct net_device *bond_dev)
|
||||||
|
|
||||||
bond_work_cancel_all(bond);
|
bond_work_cancel_all(bond);
|
||||||
|
|
||||||
bond_remove_proc_entry(bond);
|
|
||||||
|
|
||||||
bond_debug_unregister(bond);
|
bond_debug_unregister(bond);
|
||||||
|
|
||||||
__hw_addr_flush(&bond->mc_list);
|
__hw_addr_flush(&bond->mc_list);
|
||||||
|
@ -4814,7 +4818,6 @@ static int bond_init(struct net_device *bond_dev)
|
||||||
|
|
||||||
bond_set_lockdep_class(bond_dev);
|
bond_set_lockdep_class(bond_dev);
|
||||||
|
|
||||||
bond_create_proc_entry(bond);
|
|
||||||
list_add_tail(&bond->bond_list, &bn->dev_list);
|
list_add_tail(&bond->bond_list, &bn->dev_list);
|
||||||
|
|
||||||
bond_prepare_sysfs_group(bond);
|
bond_prepare_sysfs_group(bond);
|
||||||
|
|
|
@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
|
||||||
if ((phy_data & BMSR_LSTATUS) == 0) {
|
if ((phy_data & BMSR_LSTATUS) == 0) {
|
||||||
/* link down */
|
/* link down */
|
||||||
netif_carrier_off(netdev);
|
netif_carrier_off(netdev);
|
||||||
netif_stop_queue(netdev);
|
|
||||||
hw->hibernate = true;
|
hw->hibernate = true;
|
||||||
if (atl1c_reset_mac(hw) != 0)
|
if (atl1c_reset_mac(hw) != 0)
|
||||||
if (netif_msg_hw(adapter))
|
if (netif_msg_hw(adapter))
|
||||||
|
|
|
@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
|
||||||
dma_unmap_single(bp->sdev->dma_dev, mapping,
|
dma_unmap_single(bp->sdev->dma_dev, mapping,
|
||||||
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
|
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
|
skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
|
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
|
||||||
|
@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
|
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
|
bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
|
||||||
if (!bounce_skb)
|
if (!bounce_skb)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
|
|
|
@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
|
||||||
int k, last;
|
int k, last;
|
||||||
|
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
j++;
|
j = NEXT_TX_BD(j);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
|
||||||
tx_buf->skb = NULL;
|
tx_buf->skb = NULL;
|
||||||
|
|
||||||
last = tx_buf->nr_frags;
|
last = tx_buf->nr_frags;
|
||||||
j++;
|
j = NEXT_TX_BD(j);
|
||||||
for (k = 0; k < last; k++, j++) {
|
for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
|
||||||
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
|
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
|
||||||
dma_unmap_page(&bp->pdev->dev,
|
dma_unmap_page(&bp->pdev->dev,
|
||||||
dma_unmap_addr(tx_buf, mapping),
|
dma_unmap_addr(tx_buf, mapping),
|
||||||
|
|
|
@ -534,7 +534,8 @@ int cnic_unregister_driver(int ulp_type)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_read(&ulp_ops->ref_count) != 0)
|
if (atomic_read(&ulp_ops->ref_count) != 0)
|
||||||
netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
|
pr_warn("%s: Failed waiting for ref count to go to zero\n",
|
||||||
|
__func__);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
@ -1053,12 +1054,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
|
||||||
|
|
||||||
uinfo = &udev->cnic_uinfo;
|
uinfo = &udev->cnic_uinfo;
|
||||||
|
|
||||||
uinfo->mem[0].addr = dev->netdev->base_addr;
|
uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
|
||||||
uinfo->mem[0].internal_addr = dev->regview;
|
uinfo->mem[0].internal_addr = dev->regview;
|
||||||
uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
|
|
||||||
uinfo->mem[0].memtype = UIO_MEM_PHYS;
|
uinfo->mem[0].memtype = UIO_MEM_PHYS;
|
||||||
|
|
||||||
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
|
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
|
||||||
|
uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
|
||||||
|
TX_MAX_TSS_RINGS + 1);
|
||||||
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
|
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
|
||||||
PAGE_MASK;
|
PAGE_MASK;
|
||||||
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
|
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
|
||||||
|
@ -1068,6 +1070,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
|
||||||
|
|
||||||
uinfo->name = "bnx2_cnic";
|
uinfo->name = "bnx2_cnic";
|
||||||
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
|
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
|
||||||
|
uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
|
||||||
|
|
||||||
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
|
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
|
||||||
PAGE_MASK;
|
PAGE_MASK;
|
||||||
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
|
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
|
||||||
|
|
|
@ -2063,10 +2063,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Steal sock reference for processing TX time stamps */
|
if (skb->sk)
|
||||||
swap(skb_new->sk, skb->sk);
|
skb_set_owner_w(skb_new, skb->sk);
|
||||||
swap(skb_new->destructor, skb->destructor);
|
consume_skb(skb);
|
||||||
kfree_skb(skb);
|
|
||||||
skb = skb_new;
|
skb = skb_new;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
|
||||||
ctrl = er32(CTRL);
|
ctrl = er32(CTRL);
|
||||||
status = er32(STATUS);
|
status = er32(STATUS);
|
||||||
rxcw = er32(RXCW);
|
rxcw = er32(RXCW);
|
||||||
|
/* SYNCH bit and IV bit are sticky */
|
||||||
|
udelay(10);
|
||||||
|
rxcw = er32(RXCW);
|
||||||
|
|
||||||
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
|
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
|
||||||
|
|
||||||
|
|
|
@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
|
||||||
**/
|
**/
|
||||||
static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
|
static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
|
||||||
{
|
{
|
||||||
u16 phy_reg;
|
u16 phy_reg = 0;
|
||||||
u32 phy_id;
|
u32 phy_id = 0;
|
||||||
|
s32 ret_val;
|
||||||
|
u16 retry_count;
|
||||||
|
|
||||||
e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
|
for (retry_count = 0; retry_count < 2; retry_count++) {
|
||||||
|
ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
|
||||||
|
if (ret_val || (phy_reg == 0xFFFF))
|
||||||
|
continue;
|
||||||
phy_id = (u32)(phy_reg << 16);
|
phy_id = (u32)(phy_reg << 16);
|
||||||
e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
|
|
||||||
|
ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
|
||||||
|
if (ret_val || (phy_reg == 0xFFFF)) {
|
||||||
|
phy_id = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
|
phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (hw->phy.id) {
|
if (hw->phy.id) {
|
||||||
if (hw->phy.id == phy_id)
|
if (hw->phy.id == phy_id)
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else if (phy_id) {
|
||||||
if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
|
|
||||||
hw->phy.id = phy_id;
|
hw->phy.id = phy_id;
|
||||||
|
hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
/*
|
||||||
|
* In case the PHY needs to be in mdio slow mode,
|
||||||
|
* set slow mode and try to get the PHY id again.
|
||||||
|
*/
|
||||||
|
hw->phy.ops.release(hw);
|
||||||
|
ret_val = e1000_set_mdio_slow_mode_hv(hw);
|
||||||
|
if (!ret_val)
|
||||||
|
ret_val = e1000e_get_phy_id(hw);
|
||||||
|
hw->phy.ops.acquire(hw);
|
||||||
|
|
||||||
|
return !ret_val;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -6647,6 +6647,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
|
||||||
|
e_err(drv, "Enable failed, SR-IOV enabled\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Hardware supports up to 8 traffic classes */
|
/* Hardware supports up to 8 traffic classes */
|
||||||
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
|
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
|
||||||
(hw->mac.type == ixgbe_mac_82598EB &&
|
(hw->mac.type == ixgbe_mac_82598EB &&
|
||||||
|
|
|
@ -201,6 +201,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
|
||||||
unsigned int i, eop, count = 0;
|
unsigned int i, eop, count = 0;
|
||||||
unsigned int total_bytes = 0, total_packets = 0;
|
unsigned int total_bytes = 0, total_packets = 0;
|
||||||
|
|
||||||
|
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
|
||||||
|
return true;
|
||||||
|
|
||||||
i = tx_ring->next_to_clean;
|
i = tx_ring->next_to_clean;
|
||||||
eop = tx_ring->tx_buffer_info[i].next_to_watch;
|
eop = tx_ring->tx_buffer_info[i].next_to_watch;
|
||||||
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
|
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
|
||||||
|
@ -969,8 +972,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
|
||||||
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
|
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
|
||||||
for (i = 0; i < q_vector->txr_count; i++) {
|
for (i = 0; i < q_vector->txr_count; i++) {
|
||||||
tx_ring = &(adapter->tx_ring[r_idx]);
|
tx_ring = &(adapter->tx_ring[r_idx]);
|
||||||
tx_ring->total_bytes = 0;
|
|
||||||
tx_ring->total_packets = 0;
|
|
||||||
ixgbevf_clean_tx_irq(adapter, tx_ring);
|
ixgbevf_clean_tx_irq(adapter, tx_ring);
|
||||||
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
|
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
|
||||||
r_idx + 1);
|
r_idx + 1);
|
||||||
|
@ -994,16 +995,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
|
||||||
struct ixgbe_hw *hw = &adapter->hw;
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
struct ixgbevf_ring *rx_ring;
|
struct ixgbevf_ring *rx_ring;
|
||||||
int r_idx;
|
int r_idx;
|
||||||
int i;
|
|
||||||
|
|
||||||
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
|
|
||||||
for (i = 0; i < q_vector->rxr_count; i++) {
|
|
||||||
rx_ring = &(adapter->rx_ring[r_idx]);
|
|
||||||
rx_ring->total_bytes = 0;
|
|
||||||
rx_ring->total_packets = 0;
|
|
||||||
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
|
|
||||||
r_idx + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!q_vector->rxr_count)
|
if (!q_vector->rxr_count)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
|
|
@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||||
priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
|
priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
|
||||||
csum);
|
csum);
|
||||||
|
wmb();
|
||||||
entry = (++priv->cur_tx) % txsize;
|
entry = (++priv->cur_tx) % txsize;
|
||||||
desc = priv->dma_tx + entry;
|
desc = priv->dma_tx + entry;
|
||||||
|
|
||||||
|
@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||||
len, DMA_TO_DEVICE);
|
len, DMA_TO_DEVICE);
|
||||||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
|
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
|
||||||
|
wmb();
|
||||||
priv->hw->desc->set_tx_owner(desc);
|
priv->hw->desc->set_tx_owner(desc);
|
||||||
priv->tx_skbuff[entry] = NULL;
|
priv->tx_skbuff[entry] = NULL;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1212,6 +1212,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
|
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
|
||||||
wmb();
|
wmb();
|
||||||
priv->hw->desc->set_tx_owner(desc);
|
priv->hw->desc->set_tx_owner(desc);
|
||||||
|
wmb();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Interrupt on completition only for the latest segment */
|
/* Interrupt on completition only for the latest segment */
|
||||||
|
@ -1227,6 +1228,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
|
|
||||||
/* To avoid raise condition */
|
/* To avoid raise condition */
|
||||||
priv->hw->desc->set_tx_owner(first);
|
priv->hw->desc->set_tx_owner(first);
|
||||||
|
wmb();
|
||||||
|
|
||||||
priv->cur_tx++;
|
priv->cur_tx++;
|
||||||
|
|
||||||
|
@ -1290,6 +1292,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
|
||||||
}
|
}
|
||||||
wmb();
|
wmb();
|
||||||
priv->hw->desc->set_rx_owner(p + entry);
|
priv->hw->desc->set_rx_owner(p + entry);
|
||||||
|
wmb();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
|
||||||
struct mdio_mux_parent_bus *pb = cb->parent;
|
struct mdio_mux_parent_bus *pb = cb->parent;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
mutex_lock(&pb->mii_bus->mdio_lock);
|
/* In theory multiple mdio_mux could be stacked, thus creating
|
||||||
|
* more than a single level of nesting. But in practice,
|
||||||
|
* SINGLE_DEPTH_NESTING will cover the vast majority of use
|
||||||
|
* cases. We use it, instead of trying to handle the general
|
||||||
|
* case.
|
||||||
|
*/
|
||||||
|
mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
|
||||||
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
|
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
|
||||||
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
mutex_lock(&pb->mii_bus->mdio_lock);
|
mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
|
||||||
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
|
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -346,6 +346,15 @@ static const struct driver_info qmi_wwan_force_int1 = {
|
||||||
.data = BIT(1), /* interface whitelist bitmap */
|
.data = BIT(1), /* interface whitelist bitmap */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct driver_info qmi_wwan_force_int2 = {
|
||||||
|
.description = "Qualcomm WWAN/QMI device",
|
||||||
|
.flags = FLAG_WWAN,
|
||||||
|
.bind = qmi_wwan_bind_shared,
|
||||||
|
.unbind = qmi_wwan_unbind_shared,
|
||||||
|
.manage_power = qmi_wwan_manage_power,
|
||||||
|
.data = BIT(2), /* interface whitelist bitmap */
|
||||||
|
};
|
||||||
|
|
||||||
static const struct driver_info qmi_wwan_force_int3 = {
|
static const struct driver_info qmi_wwan_force_int3 = {
|
||||||
.description = "Qualcomm WWAN/QMI device",
|
.description = "Qualcomm WWAN/QMI device",
|
||||||
.flags = FLAG_WWAN,
|
.flags = FLAG_WWAN,
|
||||||
|
@ -498,6 +507,15 @@ static const struct usb_device_id products[] = {
|
||||||
.bInterfaceProtocol = 0xff,
|
.bInterfaceProtocol = 0xff,
|
||||||
.driver_info = (unsigned long)&qmi_wwan_force_int4,
|
.driver_info = (unsigned long)&qmi_wwan_force_int4,
|
||||||
},
|
},
|
||||||
|
{ /* ZTE MF60 */
|
||||||
|
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
|
||||||
|
.idVendor = 0x19d2,
|
||||||
|
.idProduct = 0x1402,
|
||||||
|
.bInterfaceClass = 0xff,
|
||||||
|
.bInterfaceSubClass = 0xff,
|
||||||
|
.bInterfaceProtocol = 0xff,
|
||||||
|
.driver_info = (unsigned long)&qmi_wwan_force_int2,
|
||||||
|
},
|
||||||
{ /* Sierra Wireless MC77xx in QMI mode */
|
{ /* Sierra Wireless MC77xx in QMI mode */
|
||||||
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
|
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
|
||||||
.idVendor = 0x1199,
|
.idVendor = 0x1199,
|
||||||
|
|
|
@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
|
||||||
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
||||||
/* create a bounce buffer in zone_dma on mapping failure. */
|
/* create a bounce buffer in zone_dma on mapping failure. */
|
||||||
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
|
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
|
||||||
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
|
bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
|
||||||
if (!bounce_skb) {
|
if (!bounce_skb) {
|
||||||
ring->current_slot = old_top_slot;
|
ring->current_slot = old_top_slot;
|
||||||
ring->used_slots = old_used_slots;
|
ring->used_slots = old_used_slots;
|
||||||
|
|
|
@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
|
if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
|
||||||
IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
|
IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
|
||||||
key_flags);
|
key_flags);
|
||||||
spin_unlock_irqrestore(&il->sta_lock, flags);
|
spin_unlock_irqrestore(&il->sta_lock, flags);
|
||||||
|
@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
|
||||||
memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
|
memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
|
||||||
il->stations[sta_id].sta.key.key_flags =
|
il->stations[sta_id].sta.key.key_flags =
|
||||||
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
|
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
|
||||||
il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
|
il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
|
||||||
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||||
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||||
|
|
||||||
|
|
|
@ -4767,7 +4767,6 @@ il_bg_watchdog(unsigned long data)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* monitor and check for other stuck queues */
|
/* monitor and check for other stuck queues */
|
||||||
if (il_is_any_associated(il)) {
|
|
||||||
for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
|
for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
|
||||||
/* skip as we already checked the command queue */
|
/* skip as we already checked the command queue */
|
||||||
if (cnt == il->cmd_queue)
|
if (cnt == il->cmd_queue)
|
||||||
|
@ -4775,7 +4774,6 @@ il_bg_watchdog(unsigned long data)
|
||||||
if (il_check_stuck_queue(il, cnt))
|
if (il_check_stuck_queue(il, cnt))
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
mod_timer(&il->watchdog,
|
mod_timer(&il->watchdog,
|
||||||
jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
|
jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
|
||||||
|
|
|
@ -958,6 +958,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
|
||||||
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
|
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
|
||||||
/* firmware doesn't support this type of hidden SSID */
|
/* firmware doesn't support this type of hidden SSID */
|
||||||
default:
|
default:
|
||||||
|
kfree(bss_cfg);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
|
||||||
case QID_RX:
|
case QID_RX:
|
||||||
if (!rt2x00queue_full(queue))
|
if (!rt2x00queue_full(queue))
|
||||||
rt2x00queue_for_each_entry(queue,
|
rt2x00queue_for_each_entry(queue,
|
||||||
Q_INDEX_DONE,
|
|
||||||
Q_INDEX,
|
Q_INDEX,
|
||||||
|
Q_INDEX_DONE,
|
||||||
NULL,
|
NULL,
|
||||||
rt2x00usb_kick_rx_entry);
|
rt2x00usb_kick_rx_entry);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1425,7 +1425,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
|
||||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||||
|
|
||||||
if (!ct || !nf_ct_is_untracked(ct)) {
|
if (!ct || !nf_ct_is_untracked(ct)) {
|
||||||
nf_reset(skb);
|
nf_conntrack_put(skb->nfct);
|
||||||
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
||||||
skb->nfctinfo = IP_CT_NEW;
|
skb->nfctinfo = IP_CT_NEW;
|
||||||
nf_conntrack_get(skb->nfct);
|
nf_conntrack_get(skb->nfct);
|
||||||
|
|
|
@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
|
||||||
struct net *net = nf_ct_net(ct);
|
struct net *net = nf_ct_net(ct);
|
||||||
struct nf_conntrack_ecache *e;
|
struct nf_conntrack_ecache *e;
|
||||||
|
|
||||||
if (net->ct.nf_conntrack_event_cb == NULL)
|
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
e = nf_ct_ecache_find(ct);
|
e = nf_ct_ecache_find(ct);
|
||||||
|
|
|
@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NETDEV_DOWN:
|
case NETDEV_DOWN:
|
||||||
|
if (dev->features & NETIF_F_HW_VLAN_FILTER)
|
||||||
|
vlan_vid_del(dev, 0);
|
||||||
|
|
||||||
/* Put all VLANs for this dev in the down state too. */
|
/* Put all VLANs for this dev in the down state too. */
|
||||||
for (i = 0; i < VLAN_N_VID; i++) {
|
for (i = 0; i < VLAN_N_VID; i++) {
|
||||||
vlandev = vlan_group_get_device(grp, i);
|
vlandev = vlan_group_get_device(grp, i);
|
||||||
|
|
|
@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
|
||||||
case AX25_P_NETROM:
|
case AX25_P_NETROM:
|
||||||
if (ax25_protocol_is_registered(AX25_P_NETROM))
|
if (ax25_protocol_is_registered(AX25_P_NETROM))
|
||||||
return -ESOCKTNOSUPPORT;
|
return -ESOCKTNOSUPPORT;
|
||||||
|
break;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ROSE_MODULE
|
#ifdef CONFIG_ROSE_MODULE
|
||||||
case AX25_P_ROSE:
|
case AX25_P_ROSE:
|
||||||
|
|
|
@ -1351,6 +1351,7 @@ void bla_free(struct bat_priv *bat_priv)
|
||||||
* @bat_priv: the bat priv with all the soft interface information
|
* @bat_priv: the bat priv with all the soft interface information
|
||||||
* @skb: the frame to be checked
|
* @skb: the frame to be checked
|
||||||
* @vid: the VLAN ID of the frame
|
* @vid: the VLAN ID of the frame
|
||||||
|
* @is_bcast: the packet came in a broadcast packet type.
|
||||||
*
|
*
|
||||||
* bla_rx avoidance checks if:
|
* bla_rx avoidance checks if:
|
||||||
* * we have to race for a claim
|
* * we have to race for a claim
|
||||||
|
@ -1361,7 +1362,8 @@ void bla_free(struct bat_priv *bat_priv)
|
||||||
* process the skb.
|
* process the skb.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
|
int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
|
||||||
|
bool is_bcast)
|
||||||
{
|
{
|
||||||
struct ethhdr *ethhdr;
|
struct ethhdr *ethhdr;
|
||||||
struct claim search_claim, *claim = NULL;
|
struct claim search_claim, *claim = NULL;
|
||||||
|
@ -1380,7 +1382,7 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
|
||||||
|
|
||||||
if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
|
if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
|
||||||
/* don't allow broadcasts while requests are in flight */
|
/* don't allow broadcasts while requests are in flight */
|
||||||
if (is_multicast_ether_addr(ethhdr->h_dest))
|
if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
|
||||||
goto handled;
|
goto handled;
|
||||||
|
|
||||||
memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
|
memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
|
||||||
|
@ -1406,8 +1408,13 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if it is a broadcast ... */
|
/* if it is a broadcast ... */
|
||||||
if (is_multicast_ether_addr(ethhdr->h_dest)) {
|
if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
|
||||||
/* ... drop it. the responsible gateway is in charge. */
|
/* ... drop it. the responsible gateway is in charge.
|
||||||
|
*
|
||||||
|
* We need to check is_bcast because with the gateway
|
||||||
|
* feature, broadcasts (like DHCP requests) may be sent
|
||||||
|
* using a unicast packet type.
|
||||||
|
*/
|
||||||
goto handled;
|
goto handled;
|
||||||
} else {
|
} else {
|
||||||
/* seems the client considers us as its best gateway.
|
/* seems the client considers us as its best gateway.
|
||||||
|
|
|
@ -23,7 +23,8 @@
|
||||||
#define _NET_BATMAN_ADV_BLA_H_
|
#define _NET_BATMAN_ADV_BLA_H_
|
||||||
|
|
||||||
#ifdef CONFIG_BATMAN_ADV_BLA
|
#ifdef CONFIG_BATMAN_ADV_BLA
|
||||||
int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
|
int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
|
||||||
|
bool is_bcast);
|
||||||
int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
|
int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
|
||||||
int bla_is_backbone_gw(struct sk_buff *skb,
|
int bla_is_backbone_gw(struct sk_buff *skb,
|
||||||
struct orig_node *orig_node, int hdr_size);
|
struct orig_node *orig_node, int hdr_size);
|
||||||
|
@ -41,7 +42,7 @@ void bla_free(struct bat_priv *bat_priv);
|
||||||
#else /* ifdef CONFIG_BATMAN_ADV_BLA */
|
#else /* ifdef CONFIG_BATMAN_ADV_BLA */
|
||||||
|
|
||||||
static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
|
static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
|
||||||
short vid)
|
short vid, bool is_bcast)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -256,7 +256,11 @@ void interface_rx(struct net_device *soft_iface,
|
||||||
struct bat_priv *bat_priv = netdev_priv(soft_iface);
|
struct bat_priv *bat_priv = netdev_priv(soft_iface);
|
||||||
struct ethhdr *ethhdr;
|
struct ethhdr *ethhdr;
|
||||||
struct vlan_ethhdr *vhdr;
|
struct vlan_ethhdr *vhdr;
|
||||||
|
struct batman_header *batadv_header = (struct batman_header *)skb->data;
|
||||||
short vid __maybe_unused = -1;
|
short vid __maybe_unused = -1;
|
||||||
|
bool is_bcast;
|
||||||
|
|
||||||
|
is_bcast = (batadv_header->packet_type == BAT_BCAST);
|
||||||
|
|
||||||
/* check if enough space is available for pulling, and pull */
|
/* check if enough space is available for pulling, and pull */
|
||||||
if (!pskb_may_pull(skb, hdr_size))
|
if (!pskb_may_pull(skb, hdr_size))
|
||||||
|
@ -302,7 +306,7 @@ void interface_rx(struct net_device *soft_iface,
|
||||||
/* Let the bridge loop avoidance check the packet. If will
|
/* Let the bridge loop avoidance check the packet. If will
|
||||||
* not handle it, we can safely push it up.
|
* not handle it, we can safely push it up.
|
||||||
*/
|
*/
|
||||||
if (bla_rx(bat_priv, skb, vid))
|
if (bla_rx(bat_priv, skb, vid, is_bcast))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
|
|
|
@ -561,9 +561,9 @@ static int __init caif_device_init(void)
|
||||||
|
|
||||||
static void __exit caif_device_exit(void)
|
static void __exit caif_device_exit(void)
|
||||||
{
|
{
|
||||||
unregister_pernet_subsys(&caif_net_ops);
|
|
||||||
unregister_netdevice_notifier(&caif_device_notifier);
|
unregister_netdevice_notifier(&caif_device_notifier);
|
||||||
dev_remove_pack(&caif_packet_type);
|
dev_remove_pack(&caif_packet_type);
|
||||||
|
unregister_pernet_subsys(&caif_net_ops);
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(caif_device_init);
|
module_init(caif_device_init);
|
||||||
|
|
|
@ -2444,8 +2444,12 @@ static void skb_update_prio(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
|
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
|
||||||
|
|
||||||
if ((!skb->priority) && (skb->sk) && map)
|
if (!skb->priority && skb->sk && map) {
|
||||||
skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
|
unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
|
||||||
|
|
||||||
|
if (prioidx < map->priomap_len)
|
||||||
|
skb->priority = map->priomap[prioidx];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define skb_update_prio(skb)
|
#define skb_update_prio(skb)
|
||||||
|
|
|
@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
set_bit(prioidx, prioidx_map);
|
set_bit(prioidx, prioidx_map);
|
||||||
spin_unlock_irqrestore(&prioidx_map_lock, flags);
|
if (atomic_read(&max_prioidx) < prioidx)
|
||||||
atomic_set(&max_prioidx, prioidx);
|
atomic_set(&max_prioidx, prioidx);
|
||||||
|
spin_unlock_irqrestore(&prioidx_map_lock, flags);
|
||||||
*prio = prioidx;
|
*prio = prioidx;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -64,7 +65,7 @@ static void put_prioidx(u32 idx)
|
||||||
spin_unlock_irqrestore(&prioidx_map_lock, flags);
|
spin_unlock_irqrestore(&prioidx_map_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void extend_netdev_table(struct net_device *dev, u32 new_len)
|
static int extend_netdev_table(struct net_device *dev, u32 new_len)
|
||||||
{
|
{
|
||||||
size_t new_size = sizeof(struct netprio_map) +
|
size_t new_size = sizeof(struct netprio_map) +
|
||||||
((sizeof(u32) * new_len));
|
((sizeof(u32) * new_len));
|
||||||
|
@ -76,7 +77,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
|
||||||
|
|
||||||
if (!new_priomap) {
|
if (!new_priomap) {
|
||||||
pr_warn("Unable to alloc new priomap!\n");
|
pr_warn("Unable to alloc new priomap!\n");
|
||||||
return;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0;
|
for (i = 0;
|
||||||
|
@ -89,46 +90,79 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
|
||||||
rcu_assign_pointer(dev->priomap, new_priomap);
|
rcu_assign_pointer(dev->priomap, new_priomap);
|
||||||
if (old_priomap)
|
if (old_priomap)
|
||||||
kfree_rcu(old_priomap, rcu);
|
kfree_rcu(old_priomap, rcu);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_netdev_tables(void)
|
static int write_update_netdev_table(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct net_device *dev;
|
int ret = 0;
|
||||||
u32 max_len = atomic_read(&max_prioidx) + 1;
|
u32 max_len;
|
||||||
struct netprio_map *map;
|
struct netprio_map *map;
|
||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
|
max_len = atomic_read(&max_prioidx) + 1;
|
||||||
|
map = rtnl_dereference(dev->priomap);
|
||||||
|
if (!map || map->priomap_len < max_len)
|
||||||
|
ret = extend_netdev_table(dev, max_len);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int update_netdev_tables(void)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
struct net_device *dev;
|
||||||
|
u32 max_len;
|
||||||
|
struct netprio_map *map;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
max_len = atomic_read(&max_prioidx) + 1;
|
||||||
for_each_netdev(&init_net, dev) {
|
for_each_netdev(&init_net, dev) {
|
||||||
map = rtnl_dereference(dev->priomap);
|
map = rtnl_dereference(dev->priomap);
|
||||||
if ((!map) ||
|
/*
|
||||||
(map->priomap_len < max_len))
|
* don't allocate priomap if we didn't
|
||||||
extend_netdev_table(dev, max_len);
|
* change net_prio.ifpriomap (map == NULL),
|
||||||
|
* this will speed up skb_update_prio.
|
||||||
|
*/
|
||||||
|
if (map && map->priomap_len < max_len) {
|
||||||
|
ret = extend_netdev_table(dev, max_len);
|
||||||
|
if (ret < 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
|
static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
|
||||||
{
|
{
|
||||||
struct cgroup_netprio_state *cs;
|
struct cgroup_netprio_state *cs;
|
||||||
int ret;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
|
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
|
||||||
if (!cs)
|
if (!cs)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
|
if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
|
||||||
kfree(cs);
|
goto out;
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = get_prioidx(&cs->prioidx);
|
ret = get_prioidx(&cs->prioidx);
|
||||||
if (ret != 0) {
|
if (ret < 0) {
|
||||||
pr_warn("No space in priority index array\n");
|
pr_warn("No space in priority index array\n");
|
||||||
kfree(cs);
|
goto out;
|
||||||
return ERR_PTR(ret);
|
}
|
||||||
|
|
||||||
|
ret = update_netdev_tables();
|
||||||
|
if (ret < 0) {
|
||||||
|
put_prioidx(cs->prioidx);
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
return &cs->css;
|
return &cs->css;
|
||||||
|
out:
|
||||||
|
kfree(cs);
|
||||||
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cgrp_destroy(struct cgroup *cgrp)
|
static void cgrp_destroy(struct cgroup *cgrp)
|
||||||
|
@ -141,7 +175,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
for_each_netdev(&init_net, dev) {
|
for_each_netdev(&init_net, dev) {
|
||||||
map = rtnl_dereference(dev->priomap);
|
map = rtnl_dereference(dev->priomap);
|
||||||
if (map)
|
if (map && cs->prioidx < map->priomap_len)
|
||||||
map->priomap[cs->prioidx] = 0;
|
map->priomap[cs->prioidx] = 0;
|
||||||
}
|
}
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
@ -165,7 +199,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_netdev_rcu(&init_net, dev) {
|
for_each_netdev_rcu(&init_net, dev) {
|
||||||
map = rcu_dereference(dev->priomap);
|
map = rcu_dereference(dev->priomap);
|
||||||
priority = map ? map->priomap[prioidx] : 0;
|
priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
|
||||||
cb->fill(cb, dev->name, priority);
|
cb->fill(cb, dev->name, priority);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -220,13 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
|
||||||
if (!dev)
|
if (!dev)
|
||||||
goto out_free_devname;
|
goto out_free_devname;
|
||||||
|
|
||||||
update_netdev_tables();
|
ret = write_update_netdev_table(dev);
|
||||||
ret = 0;
|
if (ret < 0)
|
||||||
|
goto out_put_dev;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
map = rcu_dereference(dev->priomap);
|
map = rcu_dereference(dev->priomap);
|
||||||
if (map)
|
if (map)
|
||||||
map->priomap[prioidx] = priority;
|
map->priomap[prioidx] = priority;
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
out_put_dev:
|
||||||
dev_put(dev);
|
dev_put(dev);
|
||||||
|
|
||||||
out_free_devname:
|
out_free_devname:
|
||||||
|
|
|
@ -353,7 +353,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
||||||
unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
|
unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
|
||||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||||
|
|
||||||
if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
|
if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
|
||||||
void *data = netdev_alloc_frag(fragsz);
|
void *data = netdev_alloc_frag(fragsz);
|
||||||
|
|
||||||
if (likely(data)) {
|
if (likely(data)) {
|
||||||
|
|
|
@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
|
||||||
mtu = dev->mtu;
|
mtu = dev->mtu;
|
||||||
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
|
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
|
||||||
|
|
||||||
|
if (size > mtu) {
|
||||||
|
pr_debug("size = %Zu, mtu = %u\n", size, mtu);
|
||||||
|
err = -EINVAL;
|
||||||
|
goto out_dev;
|
||||||
|
}
|
||||||
|
|
||||||
hlen = LL_RESERVED_SPACE(dev);
|
hlen = LL_RESERVED_SPACE(dev);
|
||||||
tlen = dev->needed_tailroom;
|
tlen = dev->needed_tailroom;
|
||||||
skb = sock_alloc_send_skb(sk, hlen + tlen + size,
|
skb = sock_alloc_send_skb(sk, hlen + tlen + size,
|
||||||
|
@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto out_skb;
|
goto out_skb;
|
||||||
|
|
||||||
if (size > mtu) {
|
|
||||||
pr_debug("size = %Zu, mtu = %u\n", size, mtu);
|
|
||||||
err = -EINVAL;
|
|
||||||
goto out_skb;
|
|
||||||
}
|
|
||||||
|
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
skb->sk = sk;
|
skb->sk = sk;
|
||||||
skb->protocol = htons(ETH_P_IEEE802154);
|
skb->protocol = htons(ETH_P_IEEE802154);
|
||||||
|
|
|
@ -2174,15 +2174,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
|
||||||
sdata->name, mgmt->sa, status_code);
|
sdata->name, mgmt->sa, status_code);
|
||||||
ieee80211_destroy_assoc_data(sdata, false);
|
ieee80211_destroy_assoc_data(sdata, false);
|
||||||
} else {
|
} else {
|
||||||
printk(KERN_DEBUG "%s: associated\n", sdata->name);
|
|
||||||
|
|
||||||
if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
|
if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
|
||||||
/* oops -- internal error -- send timeout for now */
|
/* oops -- internal error -- send timeout for now */
|
||||||
ieee80211_destroy_assoc_data(sdata, true);
|
ieee80211_destroy_assoc_data(sdata, false);
|
||||||
sta_info_destroy_addr(sdata, mgmt->bssid);
|
|
||||||
cfg80211_put_bss(*bss);
|
cfg80211_put_bss(*bss);
|
||||||
return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
|
return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
|
||||||
}
|
}
|
||||||
|
printk(KERN_DEBUG "%s: associated\n", sdata->name);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* destroy assoc_data afterwards, as otherwise an idle
|
* destroy assoc_data afterwards, as otherwise an idle
|
||||||
|
|
|
@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
|
||||||
max_rates = sband->n_bitrates;
|
max_rates = sband->n_bitrates;
|
||||||
}
|
}
|
||||||
|
|
||||||
msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
|
msp = kzalloc(sizeof(*msp), gfp);
|
||||||
if (!msp)
|
if (!msp)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
|
||||||
{
|
{
|
||||||
struct net_device *dev = ptr;
|
struct net_device *dev = ptr;
|
||||||
struct net *net = dev_net(dev);
|
struct net *net = dev_net(dev);
|
||||||
|
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||||
struct ip_vs_service *svc;
|
struct ip_vs_service *svc;
|
||||||
struct ip_vs_dest *dest;
|
struct ip_vs_dest *dest;
|
||||||
unsigned int idx;
|
unsigned int idx;
|
||||||
|
|
||||||
if (event != NETDEV_UNREGISTER)
|
if (event != NETDEV_UNREGISTER || !ipvs)
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
|
IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
|
||||||
EnterFunction(2);
|
EnterFunction(2);
|
||||||
|
@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
|
list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
|
||||||
__ip_vs_dev_reset(dest, dev);
|
__ip_vs_dev_reset(dest, dev);
|
||||||
}
|
}
|
||||||
mutex_unlock(&__ip_vs_mutex);
|
mutex_unlock(&__ip_vs_mutex);
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
|
|
||||||
#include <linux/netfilter/x_tables.h>
|
#include <linux/netfilter/x_tables.h>
|
||||||
#include <linux/netfilter/xt_set.h>
|
#include <linux/netfilter/xt_set.h>
|
||||||
|
#include <linux/netfilter/ipset/ip_set_timeout.h>
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
|
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
|
||||||
|
@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
|
||||||
info->del_set.flags, 0, UINT_MAX);
|
info->del_set.flags, 0, UINT_MAX);
|
||||||
|
|
||||||
/* Normalize to fit into jiffies */
|
/* Normalize to fit into jiffies */
|
||||||
if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
|
if (add_opt.timeout != IPSET_NO_TIMEOUT &&
|
||||||
|
add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
|
||||||
add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
|
add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
|
||||||
if (info->add_set.index != IPSET_INVALID_ID)
|
if (info->add_set.index != IPSET_INVALID_ID)
|
||||||
ip_set_add(info->add_set.index, skb, par, &add_opt);
|
ip_set_add(info->add_set.index, skb, par, &add_opt);
|
||||||
|
|
|
@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
|
||||||
|
|
||||||
pr_debug("%p\n", sk);
|
pr_debug("%p\n", sk);
|
||||||
|
|
||||||
if (llcp_sock == NULL)
|
if (llcp_sock == NULL || llcp_sock->dev == NULL)
|
||||||
return -EBADFD;
|
return -EBADFD;
|
||||||
|
|
||||||
addr->sa_family = AF_NFC;
|
addr->sa_family = AF_NFC;
|
||||||
|
|
|
@ -229,7 +229,7 @@ struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
|
||||||
return peer;
|
return peer;
|
||||||
|
|
||||||
new_UDP_peer:
|
new_UDP_peer:
|
||||||
_net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
|
_net("Rx UDP DGRAM from NEW peer");
|
||||||
read_unlock_bh(&rxrpc_peer_lock);
|
read_unlock_bh(&rxrpc_peer_lock);
|
||||||
_leave(" = -EBUSY [new]");
|
_leave(" = -EBUSY [new]");
|
||||||
return ERR_PTR(-EBUSY);
|
return ERR_PTR(-EBUSY);
|
||||||
|
|
|
@ -331,17 +331,15 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
|
||||||
return PSCHED_NS2TICKS(ticks);
|
return PSCHED_NS2TICKS(ticks);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
struct sk_buff_head *list = &sch->q;
|
struct sk_buff_head *list = &sch->q;
|
||||||
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
|
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb = skb_peek_tail(list);
|
||||||
|
|
||||||
if (likely(skb_queue_len(list) < sch->limit)) {
|
|
||||||
skb = skb_peek_tail(list);
|
|
||||||
/* Optimize for add at tail */
|
/* Optimize for add at tail */
|
||||||
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
|
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
|
||||||
return qdisc_enqueue_tail(nskb, sch);
|
return __skb_queue_tail(list, nskb);
|
||||||
|
|
||||||
skb_queue_reverse_walk(list, skb) {
|
skb_queue_reverse_walk(list, skb) {
|
||||||
if (tnext >= netem_skb_cb(skb)->time_to_send)
|
if (tnext >= netem_skb_cb(skb)->time_to_send)
|
||||||
|
@ -349,11 +347,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
||||||
}
|
}
|
||||||
|
|
||||||
__skb_queue_after(list, skb, nskb);
|
__skb_queue_after(list, skb, nskb);
|
||||||
sch->qstats.backlog += qdisc_pkt_len(nskb);
|
|
||||||
return NET_XMIT_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
return qdisc_reshape_fail(nskb, sch);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||||
/* We don't fill cb now as skb_unshare() may invalidate it */
|
/* We don't fill cb now as skb_unshare() may invalidate it */
|
||||||
struct netem_skb_cb *cb;
|
struct netem_skb_cb *cb;
|
||||||
struct sk_buff *skb2;
|
struct sk_buff *skb2;
|
||||||
int ret;
|
|
||||||
int count = 1;
|
int count = 1;
|
||||||
|
|
||||||
/* Random duplication */
|
/* Random duplication */
|
||||||
|
@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||||
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
|
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
|
||||||
|
return qdisc_reshape_fail(skb, sch);
|
||||||
|
|
||||||
|
sch->qstats.backlog += qdisc_pkt_len(skb);
|
||||||
|
|
||||||
cb = netem_skb_cb(skb);
|
cb = netem_skb_cb(skb);
|
||||||
if (q->gap == 0 || /* not doing reordering */
|
if (q->gap == 0 || /* not doing reordering */
|
||||||
q->counter < q->gap - 1 || /* inside last reordering gap */
|
q->counter < q->gap - 1 || /* inside last reordering gap */
|
||||||
|
@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||||
|
|
||||||
cb->time_to_send = now + delay;
|
cb->time_to_send = now + delay;
|
||||||
++q->counter;
|
++q->counter;
|
||||||
ret = tfifo_enqueue(skb, sch);
|
tfifo_enqueue(skb, sch);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Do re-ordering by putting one out of N packets at the front
|
* Do re-ordering by putting one out of N packets at the front
|
||||||
|
@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||||
q->counter = 0;
|
q->counter = 0;
|
||||||
|
|
||||||
__skb_queue_head(&sch->q, skb);
|
__skb_queue_head(&sch->q, skb);
|
||||||
sch->qstats.backlog += qdisc_pkt_len(skb);
|
|
||||||
sch->qstats.requeues++;
|
sch->qstats.requeues++;
|
||||||
ret = NET_XMIT_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret != NET_XMIT_SUCCESS) {
|
|
||||||
if (net_xmit_drop_count(ret)) {
|
|
||||||
sch->qstats.drops++;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
|
|
|
@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||||
|
|
||||||
sch->qstats.backlog = q->qdisc->qstats.backlog;
|
sch->qstats.backlog = q->qdisc->qstats.backlog;
|
||||||
opts = nla_nest_start(skb, TCA_OPTIONS);
|
opts = nla_nest_start(skb, TCA_OPTIONS);
|
||||||
|
if (opts == NULL)
|
||||||
|
goto nla_put_failure;
|
||||||
if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
|
if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
return nla_nest_end(skb, opts);
|
return nla_nest_end(skb, opts);
|
||||||
|
|
|
@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
|
||||||
|
|
||||||
epb = &ep->base;
|
epb = &ep->base;
|
||||||
|
|
||||||
if (hlist_unhashed(&epb->node))
|
|
||||||
return;
|
|
||||||
|
|
||||||
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
|
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
|
||||||
|
|
||||||
head = &sctp_ep_hashtable[epb->hashent];
|
head = &sctp_ep_hashtable[epb->hashent];
|
||||||
|
|
||||||
sctp_write_lock(&head->lock);
|
sctp_write_lock(&head->lock);
|
||||||
__hlist_del(&epb->node);
|
hlist_del_init(&epb->node);
|
||||||
sctp_write_unlock(&head->lock);
|
sctp_write_unlock(&head->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
|
||||||
head = &sctp_assoc_hashtable[epb->hashent];
|
head = &sctp_assoc_hashtable[epb->hashent];
|
||||||
|
|
||||||
sctp_write_lock(&head->lock);
|
sctp_write_lock(&head->lock);
|
||||||
__hlist_del(&epb->node);
|
hlist_del_init(&epb->node);
|
||||||
sctp_write_unlock(&head->lock);
|
sctp_write_unlock(&head->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1231,8 +1231,14 @@ static int __sctp_connect(struct sock* sk,
|
||||||
SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
|
SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
|
||||||
" kaddrs: %p err: %d\n",
|
" kaddrs: %p err: %d\n",
|
||||||
asoc, kaddrs, err);
|
asoc, kaddrs, err);
|
||||||
if (asoc)
|
if (asoc) {
|
||||||
|
/* sctp_primitive_ASSOCIATE may have added this association
|
||||||
|
* To the hash table, try to unhash it, just in case, its a noop
|
||||||
|
* if it wasn't hashed so we're safe
|
||||||
|
*/
|
||||||
|
sctp_unhash_established(asoc);
|
||||||
sctp_association_free(asoc);
|
sctp_association_free(asoc);
|
||||||
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
if (new_asoc)
|
if (new_asoc) {
|
||||||
|
sctp_unhash_established(asoc);
|
||||||
sctp_association_free(asoc);
|
sctp_association_free(asoc);
|
||||||
|
}
|
||||||
out_unlock:
|
out_unlock:
|
||||||
sctp_release_sock(sk);
|
sctp_release_sock(sk);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue