Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Several netfilter fixes from Pablo and the crew:
      - Handle fragmented packets properly in netfilter conntrack, from
        Florian Westphal.
      - Fix SCTP ICMP packet handling, from Ying Xue.
      - Fix big-endian bug in nftables, from Liping Zhang.
      - Fix alignment of fake conntrack entry, from Steven Rostedt.

 2) Fix feature flags setting in fjes driver, from Taku Izumi.

 3) Openvswitch ipv6 tunnel source address not set properly, from Or
    Gerlitz.

 4) Fix jumbo MTU handling in amd-xgbe driver, from Thomas Lendacky.

 5) sk->sk_frag.page not released properly in some cases, from Eric
    Dumazet.

 6) Fix RTNL deadlocks in nl80211, from Johannes Berg.

 7) Fix erroneous RTNL lockdep splat in crypto, from Herbert Xu.

 8) Cure improper inflight handling during AF_UNIX GC, from Andrey
    Ulanov.

 9) sch_dsmark doesn't write to packet headers properly, from Eric
    Dumazet.

10) Fix SCM_TIMESTAMPING_OPT_STATS handling in TCP, from Soheil Hassas
    Yeganeh.

11) Add some IDs for Motorola qmi_wwan chips, from Tony Lindgren.

12) Fix nametbl deadlock in tipc, from Ying Xue.

13) GRO and LRO packets not counted correctly in mlx5 driver, from Gal
    Pressman.

14) Fix reset of internal PHYs in bcmgenet, from Doug Berger.

15) Fix hashmap allocation handling, from Alexei Starovoitov.

16) nl_fib_input() needs stronger netlink message length checking, from
    Eric Dumazet.

17) Fix double-free of sk->sk_filter during sock clone, from Daniel
    Borkmann.

18) Fix RX checksum offloading in aquantia driver, from Pavel Belous.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (85 commits)
  net:ethernet:aquantia: Fix for RX checksum offload.
  amd-xgbe: Fix the ECC-related bit position definitions
  sfc: cleanup a condition in efx_udp_tunnel_del()
  Bluetooth: btqcomsmd: fix compile-test dependency
  inet: frag: release spinlock before calling icmp_send()
  tcp: initialize icsk_ack.lrcvtime at session start time
  genetlink: fix counting regression on ctrl_dumpfamily()
  socket, bpf: fix sk_filter use after free in sk_clone_lock
  ipv4: provide stronger user input validation in nl_fib_input()
  bpf: fix hashmap extra_elems logic
  enic: update enic maintainers
  net: bcmgenet: remove bcmgenet_internal_phy_setup()
  ipv6: make sure to initialize sockc.tsflags before first use
  fjes: Do not load fjes driver if extended socket device is not power on.
  fjes: Do not load fjes driver if system does not have extended socket device.
  net/mlx5e: Count LRO packets correctly
  net/mlx5e: Count GSO packets correctly
  net/mlx5: Increase number of max QPs in default profile
  net/mlx5e: Avoid supporting udp tunnel port ndo for VF reps
  net/mlx5e: Use the proper UAPI values when offloading TC vlan actions
  ...
This commit is contained in:
Linus Torvalds 2017-03-23 11:29:49 -07:00
commit f341d9f08a
100 changed files with 1085 additions and 611 deletions

View File

@ -3216,7 +3216,6 @@ F: drivers/platform/chrome/
CISCO VIC ETHERNET NIC DRIVER CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com> M: Christian Benvenuti <benve@cisco.com>
M: Sujith Sankar <ssujith@cisco.com>
M: Govindarajulu Varadarajan <_govind@gmx.com> M: Govindarajulu Varadarajan <_govind@gmx.com>
M: Neel Patel <neepatel@cisco.com> M: Neel Patel <neepatel@cisco.com>
S: Supported S: Supported
@ -7774,13 +7773,6 @@ F: include/net/mac80211.h
F: net/mac80211/ F: net/mac80211/
F: drivers/net/wireless/mac80211_hwsim.[ch] F: drivers/net/wireless/mac80211_hwsim.[ch]
MACVLAN DRIVER
M: Patrick McHardy <kaber@trash.net>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/macvlan.c
F: include/linux/if_macvlan.h
MAILBOX API MAILBOX API
M: Jassi Brar <jassisinghbrar@gmail.com> M: Jassi Brar <jassisinghbrar@gmail.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
@ -7853,6 +7845,8 @@ F: drivers/net/ethernet/marvell/mvneta.*
MARVELL MWIFIEX WIRELESS DRIVER MARVELL MWIFIEX WIRELESS DRIVER
M: Amitkumar Karwar <akarwar@marvell.com> M: Amitkumar Karwar <akarwar@marvell.com>
M: Nishant Sarmukadam <nishants@marvell.com> M: Nishant Sarmukadam <nishants@marvell.com>
M: Ganapathi Bhat <gbhat@marvell.com>
M: Xinming Hu <huxm@marvell.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/wireless/marvell/mwifiex/ F: drivers/net/wireless/marvell/mwifiex/
@ -13383,14 +13377,6 @@ W: https://linuxtv.org
S: Maintained S: Maintained
F: drivers/media/platform/vivid/* F: drivers/media/platform/vivid/*
VLAN (802.1Q)
M: Patrick McHardy <kaber@trash.net>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/macvlan.c
F: include/linux/if_*vlan.h
F: net/8021q/
VLYNQ BUS VLYNQ BUS
M: Florian Fainelli <f.fainelli@gmail.com> M: Florian Fainelli <f.fainelli@gmail.com>
L: openwrt-devel@lists.openwrt.org (subscribers-only) L: openwrt-devel@lists.openwrt.org (subscribers-only)

View File

@ -344,7 +344,8 @@ config BT_WILINK
config BT_QCOMSMD config BT_QCOMSMD
tristate "Qualcomm SMD based HCI support" tristate "Qualcomm SMD based HCI support"
depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
select BT_QCA select BT_QCA
help help
Qualcomm SMD based HCI driver. Qualcomm SMD based HCI driver.

View File

@ -984,29 +984,29 @@
#define XP_ECC_CNT1_DESC_DED_WIDTH 8 #define XP_ECC_CNT1_DESC_DED_WIDTH 8
#define XP_ECC_CNT1_DESC_SEC_INDEX 0 #define XP_ECC_CNT1_DESC_SEC_INDEX 0
#define XP_ECC_CNT1_DESC_SEC_WIDTH 8 #define XP_ECC_CNT1_DESC_SEC_WIDTH 8
#define XP_ECC_IER_DESC_DED_INDEX 0 #define XP_ECC_IER_DESC_DED_INDEX 5
#define XP_ECC_IER_DESC_DED_WIDTH 1 #define XP_ECC_IER_DESC_DED_WIDTH 1
#define XP_ECC_IER_DESC_SEC_INDEX 1 #define XP_ECC_IER_DESC_SEC_INDEX 4
#define XP_ECC_IER_DESC_SEC_WIDTH 1 #define XP_ECC_IER_DESC_SEC_WIDTH 1
#define XP_ECC_IER_RX_DED_INDEX 2 #define XP_ECC_IER_RX_DED_INDEX 3
#define XP_ECC_IER_RX_DED_WIDTH 1 #define XP_ECC_IER_RX_DED_WIDTH 1
#define XP_ECC_IER_RX_SEC_INDEX 3 #define XP_ECC_IER_RX_SEC_INDEX 2
#define XP_ECC_IER_RX_SEC_WIDTH 1 #define XP_ECC_IER_RX_SEC_WIDTH 1
#define XP_ECC_IER_TX_DED_INDEX 4 #define XP_ECC_IER_TX_DED_INDEX 1
#define XP_ECC_IER_TX_DED_WIDTH 1 #define XP_ECC_IER_TX_DED_WIDTH 1
#define XP_ECC_IER_TX_SEC_INDEX 5 #define XP_ECC_IER_TX_SEC_INDEX 0
#define XP_ECC_IER_TX_SEC_WIDTH 1 #define XP_ECC_IER_TX_SEC_WIDTH 1
#define XP_ECC_ISR_DESC_DED_INDEX 0 #define XP_ECC_ISR_DESC_DED_INDEX 5
#define XP_ECC_ISR_DESC_DED_WIDTH 1 #define XP_ECC_ISR_DESC_DED_WIDTH 1
#define XP_ECC_ISR_DESC_SEC_INDEX 1 #define XP_ECC_ISR_DESC_SEC_INDEX 4
#define XP_ECC_ISR_DESC_SEC_WIDTH 1 #define XP_ECC_ISR_DESC_SEC_WIDTH 1
#define XP_ECC_ISR_RX_DED_INDEX 2 #define XP_ECC_ISR_RX_DED_INDEX 3
#define XP_ECC_ISR_RX_DED_WIDTH 1 #define XP_ECC_ISR_RX_DED_WIDTH 1
#define XP_ECC_ISR_RX_SEC_INDEX 3 #define XP_ECC_ISR_RX_SEC_INDEX 2
#define XP_ECC_ISR_RX_SEC_WIDTH 1 #define XP_ECC_ISR_RX_SEC_WIDTH 1
#define XP_ECC_ISR_TX_DED_INDEX 4 #define XP_ECC_ISR_TX_DED_INDEX 1
#define XP_ECC_ISR_TX_DED_WIDTH 1 #define XP_ECC_ISR_TX_DED_WIDTH 1
#define XP_ECC_ISR_TX_SEC_INDEX 5 #define XP_ECC_ISR_TX_SEC_INDEX 0
#define XP_ECC_ISR_TX_SEC_WIDTH 1 #define XP_ECC_ISR_TX_SEC_WIDTH 1
#define XP_I2C_MUTEX_BUSY_INDEX 31 #define XP_I2C_MUTEX_BUSY_INDEX 31
#define XP_I2C_MUTEX_BUSY_WIDTH 1 #define XP_I2C_MUTEX_BUSY_WIDTH 1
@ -1148,8 +1148,8 @@
#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 #define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 #define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
@ -1158,6 +1158,8 @@
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16 #define RX_NORMAL_DESC0_OVT_WIDTH 16

View File

@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Get the header length */ /* Get the header length */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
FIRST, 1);
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL); RX_NORMAL_DESC2, HL);
if (rdata->rx.hdr_len) if (rdata->rx.hdr_len)
pdata->ext_stats.rx_split_header_packets++; pdata->ext_stats.rx_split_header_packets++;
} else {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
FIRST, 0);
} }
/* Get the RSS hash */ /* Get the RSS hash */
@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
} }
} }
/* Get the packet length */ /* Not all the data has been transferred for this packet */
rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
/* Not all the data has been transferred for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
INCOMPLETE, 1);
return 0; return 0;
}
/* This is the last of the data for this packet */ /* This is the last of the data for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
INCOMPLETE, 0); LAST, 1);
/* Get the packet length */
rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
/* Set checksum done indicator as appropriate */ /* Set checksum done indicator as appropriate */
if (netdev->features & NETIF_F_RXCSUM) if (netdev->features & NETIF_F_RXCSUM)

View File

@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
{ {
struct sk_buff *skb; struct sk_buff *skb;
u8 *packet; u8 *packet;
unsigned int copy_len;
skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
if (!skb) if (!skb)
return NULL; return NULL;
/* Start with the header buffer which may contain just the header /* Pull in the header buffer which may contain just the header
* or the header plus data * or the header plus data
*/ */
dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
packet = page_address(rdata->rx.hdr.pa.pages) + packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset; rdata->rx.hdr.pa.pages_offset;
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; skb_copy_to_linear_data(skb, packet, len);
copy_len = min(rdata->rx.hdr.dma_len, copy_len); skb_put(skb, len);
skb_copy_to_linear_data(skb, packet, copy_len);
skb_put(skb, copy_len);
len -= copy_len;
if (len) {
/* Add the remaining data as a frag */
dma_sync_single_range_for_cpu(pdata->dev,
rdata->rx.buf.dma_base,
rdata->rx.buf.dma_off,
rdata->rx.buf.dma_len,
DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
len, rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
return skb; return skb;
} }
static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
struct xgbe_packet_data *packet)
{
/* Always zero if not the first descriptor */
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
return 0;
/* First descriptor with split header, return header length */
if (rdata->rx.hdr_len)
return rdata->rx.hdr_len;
/* First descriptor but not the last descriptor and no split header,
* so the full buffer was used
*/
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
return rdata->rx.hdr.dma_len;
/* First descriptor and last descriptor and no split header, so
* calculate how much of the buffer was used
*/
return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
}
static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
struct xgbe_packet_data *packet,
unsigned int len)
{
/* Always the full buffer if not the last descriptor */
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
return rdata->rx.buf.dma_len;
/* Last descriptor so calculate how much of the buffer was used
* for the last bit of data
*/
return rdata->rx.len - len;
}
static int xgbe_tx_poll(struct xgbe_channel *channel) static int xgbe_tx_poll(struct xgbe_channel *channel)
{ {
struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_prv_data *pdata = channel->pdata;
@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct napi_struct *napi; struct napi_struct *napi;
struct sk_buff *skb; struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps; struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context; unsigned int last, error, context_next, context;
unsigned int len, rdesc_len, max_len; unsigned int len, buf1_len, buf2_len, max_len;
unsigned int received = 0; unsigned int received = 0;
int packet_count = 0; int packet_count = 0;
@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring) if (!ring)
return 0; return 0;
incomplete = 0; last = 0;
context_next = 0; context_next = 0;
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@ -2137,9 +2155,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
received++; received++;
ring->cur++; ring->cur++;
incomplete = XGMAC_GET_BITS(packet->attributes, last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
RX_PACKET_ATTRIBUTES, LAST);
INCOMPLETE);
context_next = XGMAC_GET_BITS(packet->attributes, context_next = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT); CONTEXT_NEXT);
@ -2148,7 +2165,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
CONTEXT); CONTEXT);
/* Earlier error, just drain the remaining data */ /* Earlier error, just drain the remaining data */
if ((incomplete || context_next) && error) if ((!last || context_next) && error)
goto read_again; goto read_again;
if (error || packet->errors) { if (error || packet->errors) {
@ -2160,16 +2177,22 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
} }
if (!context) { if (!context) {
/* Length is cumulative, get this descriptor's length */ /* Get the data length in the descriptor buffers */
rdesc_len = rdata->rx.len - len; buf1_len = xgbe_rx_buf1_len(rdata, packet);
len += rdesc_len; len += buf1_len;
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;
if (rdesc_len && !skb) { if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata, skb = xgbe_create_skb(pdata, napi, rdata,
rdesc_len); buf1_len);
if (!skb) if (!skb) {
error = 1; error = 1;
} else if (rdesc_len) { goto skip_data;
}
}
if (buf2_len) {
dma_sync_single_range_for_cpu(pdata->dev, dma_sync_single_range_for_cpu(pdata->dev,
rdata->rx.buf.dma_base, rdata->rx.buf.dma_base,
rdata->rx.buf.dma_off, rdata->rx.buf.dma_off,
@ -2179,13 +2202,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages, rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset, rdata->rx.buf.pa.pages_offset,
rdesc_len, buf2_len,
rdata->rx.buf.dma_len); rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL; rdata->rx.buf.pa.pages = NULL;
} }
} }
if (incomplete || context_next) skip_data:
if (!last || context_next)
goto read_again; goto read_again;
if (!skb) if (!skb)
@ -2243,7 +2267,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
} }
/* Check if we need to save state before leaving */ /* Check if we need to save state before leaving */
if (received && (incomplete || context_next)) { if (received && (!last || context_next)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdata->state_saved = 1; rdata->state_saved = 1;
rdata->state.skb = skb; rdata->state.skb = skb;

View File

@ -98,6 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
if (err < 0) if (err < 0)
goto err_exit; goto err_exit;
ndev->mtu = new_mtu;
if (netif_running(ndev)) { if (netif_running(ndev)) {
aq_ndev_close(ndev); aq_ndev_close(ndev);

View File

@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
.tx_rings = HW_ATL_A0_TX_RINGS, .tx_rings = HW_ATL_A0_TX_RINGS,
.rx_rings = HW_ATL_A0_RX_RINGS, .rx_rings = HW_ATL_A0_RX_RINGS,
.hw_features = NETIF_F_HW_CSUM | .hw_features = NETIF_F_HW_CSUM |
NETIF_F_RXCSUM |
NETIF_F_RXHASH | NETIF_F_RXHASH |
NETIF_F_SG | NETIF_F_SG |
NETIF_F_TSO, NETIF_F_TSO,

View File

@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
.tx_rings = HW_ATL_B0_TX_RINGS, .tx_rings = HW_ATL_B0_TX_RINGS,
.rx_rings = HW_ATL_B0_RX_RINGS, .rx_rings = HW_ATL_B0_RX_RINGS,
.hw_features = NETIF_F_HW_CSUM | .hw_features = NETIF_F_HW_CSUM |
NETIF_F_RXCSUM |
NETIF_F_RXHASH | NETIF_F_RXHASH |
NETIF_F_SG | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO |

View File

@ -3481,7 +3481,8 @@ static int bcmgenet_suspend(struct device *d)
bcmgenet_netif_stop(dev); bcmgenet_netif_stop(dev);
phy_suspend(priv->phydev); if (!device_may_wakeup(d))
phy_suspend(priv->phydev);
netif_device_detach(dev); netif_device_detach(dev);
@ -3578,7 +3579,8 @@ static int bcmgenet_resume(struct device *d)
netif_device_attach(dev); netif_device_attach(dev);
phy_resume(priv->phydev); if (!device_may_wakeup(d))
phy_resume(priv->phydev);
if (priv->eee.eee_enabled) if (priv->eee.eee_enabled)
bcmgenet_eee_enable_set(dev, true); bcmgenet_eee_enable_set(dev, true);

View File

@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
udelay(60); udelay(60);
} }
static void bcmgenet_internal_phy_setup(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg;
/* Power up PHY */
bcmgenet_phy_power_set(dev, true);
/* enable APD */
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_PWR_DN_EN_LD;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
bcmgenet_mii_reset(dev);
}
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{ {
u32 reg; u32 reg;
@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev)
if (priv->internal_phy) { if (priv->internal_phy) {
phy_name = "internal PHY"; phy_name = "internal PHY";
bcmgenet_internal_phy_setup(dev);
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
phy_name = "MoCA"; phy_name = "MoCA";
bcmgenet_moca_phy_setup(priv); bcmgenet_moca_phy_setup(priv);

View File

@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
return PTR_ERR(kern_buf); return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len); rc = sscanf(kern_buf, "%x:%x", &addr, &len);
if (rc < 2) { if (rc < 2 || len > UINT_MAX >> 2) {
netdev_warn(bnad->netdev, "failed to read user buffer\n"); netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf); kfree(kern_buf);
return -EINVAL; return -EINVAL;

View File

@ -1257,6 +1257,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
release_sub_crq_queue(adapter, release_sub_crq_queue(adapter,
adapter->tx_scrq[i]); adapter->tx_scrq[i]);
} }
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL; adapter->tx_scrq = NULL;
} }
@ -1269,6 +1270,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
release_sub_crq_queue(adapter, release_sub_crq_queue(adapter,
adapter->rx_scrq[i]); adapter->rx_scrq[i]);
} }
kfree(adapter->rx_scrq);
adapter->rx_scrq = NULL; adapter->rx_scrq = NULL;
} }
} }

View File

@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev)
rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
/* PCI might be offline */ /* PCI might be offline */
/* If device removal has been requested,
* do not continue retrying.
*/
if (dev->persist->interface_state &
MLX4_INTERFACE_STATE_NOWAIT) {
mlx4_warn(dev,
"communication channel is offline\n");
return -EIO;
}
msleep(100); msleep(100);
wr_toggle = swab32(readl(&priv->mfunc.comm-> wr_toggle = swab32(readl(&priv->mfunc.comm->
slave_write)); slave_write));

View File

@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
(u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
if (!offline_bit) if (!offline_bit)
return 0; return 0;
/* If device removal has been requested,
* do not continue retrying.
*/
if (dev->persist->interface_state &
MLX4_INTERFACE_STATE_NOWAIT)
break;
/* There are cases as part of AER/Reset flow that PF needs /* There are cases as part of AER/Reset flow that PF needs
* around 100 msec to load. We therefore sleep for 100 msec * around 100 msec to load. We therefore sleep for 100 msec
* to allow other tasks to make use of that CPU during this * to allow other tasks to make use of that CPU during this
@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv); struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0; int active_vfs = 0;
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
mutex_lock(&persist->interface_state_mutex); mutex_lock(&persist->interface_state_mutex);
persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
mutex_unlock(&persist->interface_state_mutex); mutex_unlock(&persist->interface_state_mutex);

View File

@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER:
case MLX5_CMD_OP_SET_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_PD:
case MLX5_CMD_OP_ALLOC_UAR: case MLX5_CMD_OP_ALLOC_UAR:
case MLX5_CMD_OP_CONFIG_INT_MODERATION: case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@ -497,6 +499,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(ALLOC_PD); MLX5_COMMAND_STR_CASE(ALLOC_PD);
MLX5_COMMAND_STR_CASE(DEALLOC_PD); MLX5_COMMAND_STR_CASE(DEALLOC_PD);
MLX5_COMMAND_STR_CASE(ALLOC_UAR); MLX5_COMMAND_STR_CASE(ALLOC_UAR);

View File

@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti);
void mlx5e_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti);
int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp); void *sp);

View File

@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats); vf_stats);
} }
void mlx5e_add_vxlan_port(struct net_device *netdev, static void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti) struct udp_tunnel_info *ti)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev,
mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
} }
void mlx5e_del_vxlan_port(struct net_device *netdev, static void mlx5e_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti) struct udp_tunnel_info *ti)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);

View File

@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_ndo_setup_tc, .ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
.ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
.ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats,
}; };

View File

@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (lro_num_seg > 1) { if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
/* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe()
*/
rq->stats.packets += lro_num_seg - 1;
rq->stats.lro_packets++; rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt; rq->stats.lro_bytes += cqe_bcnt;
} }

View File

@ -133,6 +133,23 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return rule; return rule;
} }
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_fc *counter = NULL;
if (!IS_ERR(flow->rule)) {
counter = mlx5_flow_rule_counter(flow->rule);
mlx5_del_flow_rules(flow->rule);
mlx5_fc_destroy(priv->mdev, counter);
}
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
}
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
} }
static void mlx5e_detach_encap(struct mlx5e_priv *priv, static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) { struct mlx5e_tc_flow *flow);
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
mlx5_eswitch_del_vlan_action(esw, flow->attr);
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct list_head *next = flow->encap.next; struct list_head *next = flow->encap.next;
list_del(&flow->encap); list_del(&flow->encap);
@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
struct mlx5_fc *counter = NULL; mlx5e_tc_del_fdb_flow(priv, flow);
else
if (!IS_ERR(flow->rule)) { mlx5e_tc_del_nic_flow(priv, flow);
counter = mlx5_flow_rule_counter(flow->rule);
mlx5_del_flow_rules(flow->rule);
mlx5_fc_destroy(priv->mdev, counter);
}
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
mlx5_eswitch_del_vlan_action(esw, flow->attr);
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
}
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
} }
static void parse_vxlan_attr(struct mlx5_flow_spec *spec, static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
@ -248,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector, skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS, FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask); f->mask);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */ /* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
goto vxlan_match_offload_err; goto vxlan_match_offload_err;
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f); parse_vxlan_attr(spec, f);
else { else {
@ -976,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
unsigned short family = ip_tunnel_info_af(tun_info); unsigned short family = ip_tunnel_info_af(tun_info);
struct ip_tunnel_key *key = &tun_info->key; struct ip_tunnel_key *key = &tun_info->key;
struct mlx5_encap_entry *e; struct mlx5_encap_entry *e;
@ -996,7 +1020,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN; tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else { } else {
@ -1112,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
} }
if (is_tcf_vlan(a)) { if (is_tcf_vlan(a)) {
if (tcf_vlan_action(a) == VLAN_F_POP) { if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == VLAN_F_PUSH) { } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
attr->vlan = tcf_vlan_push_vid(a); attr->vlan = tcf_vlan_push_vid(a);
} else { /* action is TCA_VLAN_ACT_MODIFY */
return -EOPNOTSUPP;
} }
continue; continue;
} }

View File

@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_bytes += skb->len - ihs; sq->stats.tso_bytes += skb->len - ihs;
} }
sq->stats.packets += skb_shinfo(skb)->gso_segs;
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
} else { } else {
bf = sq->bf_budget && bf = sq->bf_budget &&
!skb->xmit_more && !skb->xmit_more &&
!skb_shinfo(skb)->nr_frags; !skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
sq->stats.packets++;
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
} }
sq->stats.bytes += num_bytes;
wi->num_bytes = num_bytes; wi->num_bytes = num_bytes;
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (bf) if (bf)
sq->bf_budget--; sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
return NETDEV_TX_OK; return NETDEV_TX_OK;
dma_unmap_wqe_err: dma_unmap_wqe_err:

View File

@ -209,6 +209,7 @@ struct mlx5_esw_offload {
struct mlx5_eswitch_rep *vport_reps; struct mlx5_eswitch_rep *vport_reps;
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
u8 inline_mode; u8 inline_mode;
u64 num_flows;
}; };
struct mlx5_eswitch { struct mlx5_eswitch {
@ -271,6 +272,11 @@ struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr); struct mlx5_esw_flow_attr *attr);
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);

View File

@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec, &flow_act, dest, i); spec, &flow_act, dest, i);
if (IS_ERR(rule)) if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter); mlx5_fc_destroy(esw->dev, counter);
else
esw->offloads.num_flows++;
return rule; return rule;
} }
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_fc *counter = NULL;
if (!IS_ERR(rule)) {
counter = mlx5_flow_rule_counter(rule);
mlx5_del_flow_rules(rule);
mlx5_fc_destroy(esw->dev, counter);
esw->offloads.num_flows--;
}
}
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
{ {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (esw->offloads.num_flows > 0) {
esw_warn(dev, "Can't set inline mode when flows are configured\n");
return -EOPNOTSUPP;
}
err = esw_inline_mode_from_devlink(mode, &mlx5_mode); err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
if (err) if (err)
goto out; goto out;

View File

@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = {
[2] = { [2] = {
.mask = MLX5_PROF_MASK_QP_SIZE | .mask = MLX5_PROF_MASK_QP_SIZE |
MLX5_PROF_MASK_MR_CACHE, MLX5_PROF_MASK_MR_CACHE,
.log_max_qp = 17, .log_max_qp = 18,
.mr_cache[0] = { .mr_cache[0] = {
.size = 500, .size = 500,
.limit = 250 .limit = 250

View File

@ -2404,7 +2404,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t
tnl.type = (u16)efx_tunnel_type; tnl.type = (u16)efx_tunnel_type;
tnl.port = ti->port; tnl.port = ti->port;
if (efx->type->udp_tnl_add_port) if (efx->type->udp_tnl_del_port)
(void)efx->type->udp_tnl_del_port(efx, tnl); (void)efx->type->udp_tnl_del_port(efx, tnl);
} }

View File

@ -74,15 +74,21 @@ config TI_CPSW
will be called cpsw. will be called cpsw.
config TI_CPTS config TI_CPTS
tristate "TI Common Platform Time Sync (CPTS) Support" bool "TI Common Platform Time Sync (CPTS) Support"
depends on TI_CPSW || TI_KEYSTONE_NETCP depends on TI_CPSW || TI_KEYSTONE_NETCP
imply PTP_1588_CLOCK depends on PTP_1588_CLOCK
---help--- ---help---
This driver supports the Common Platform Time Sync unit of This driver supports the Common Platform Time Sync unit of
the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem. the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
driver offers a PTP Hardware Clock. driver offers a PTP Hardware Clock.
config TI_CPTS_MOD
tristate
depends on TI_CPTS
default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
default m
config TI_KEYSTONE_NETCP config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support" tristate "TI Keystone NETCP Core Support"
select TI_CPSW_ALE select TI_CPSW_ALE

View File

@ -12,7 +12,7 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
obj-$(CONFIG_TI_CPTS) += cpts.o obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw.o ti_cpsw-y := cpsw.o

View File

@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION); MODULE_VERSION(DRV_VERSION);
#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
static int fjes_request_irq(struct fjes_adapter *); static int fjes_request_irq(struct fjes_adapter *);
static void fjes_free_irq(struct fjes_adapter *); static void fjes_free_irq(struct fjes_adapter *);
@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int);
static int fjes_poll(struct napi_struct *, int); static int fjes_poll(struct napi_struct *, int);
static const struct acpi_device_id fjes_acpi_ids[] = { static const struct acpi_device_id fjes_acpi_ids[] = {
{"PNP0C02", 0}, {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
{"", 0}, {"", 0},
}; };
MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
@ -115,18 +117,17 @@ static struct resource fjes_resource[] = {
}, },
}; };
static int fjes_acpi_add(struct acpi_device *device) static bool is_extended_socket_device(struct acpi_device *device)
{ {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
struct platform_device *plat_dev;
union acpi_object *str; union acpi_object *str;
acpi_status status; acpi_status status;
int result; int result;
status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return -ENODEV; return false;
str = buffer.pointer; str = buffer.pointer;
result = utf16s_to_utf8s((wchar_t *)str->string.pointer, result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
@ -136,10 +137,42 @@ static int fjes_acpi_add(struct acpi_device *device)
if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
kfree(buffer.pointer); kfree(buffer.pointer);
return -ENODEV; return false;
} }
kfree(buffer.pointer); kfree(buffer.pointer);
return true;
}
static int acpi_check_extended_socket_status(struct acpi_device *device)
{
unsigned long long sta;
acpi_status status;
status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
(sta & ACPI_STA_DEVICE_ENABLED) &&
(sta & ACPI_STA_DEVICE_UI) &&
(sta & ACPI_STA_DEVICE_FUNCTIONING)))
return -ENODEV;
return 0;
}
static int fjes_acpi_add(struct acpi_device *device)
{
struct platform_device *plat_dev;
acpi_status status;
if (!is_extended_socket_device(device))
return -ENODEV;
if (acpi_check_extended_socket_status(device))
return -ENODEV;
status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
fjes_get_acpi_resource, fjes_resource); fjes_get_acpi_resource, fjes_resource);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
@ -1316,7 +1349,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
netdev->min_mtu = fjes_support_mtu[0]; netdev->min_mtu = fjes_support_mtu[0];
netdev->max_mtu = fjes_support_mtu[3]; netdev->max_mtu = fjes_support_mtu[3];
netdev->flags |= IFF_BROADCAST; netdev->flags |= IFF_BROADCAST;
netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
} }
static void fjes_irq_watch_task(struct work_struct *work) static void fjes_irq_watch_task(struct work_struct *work)
@ -1473,11 +1506,44 @@ static void fjes_watch_unshare_task(struct work_struct *work)
} }
} }
static acpi_status
acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
void *context, void **return_value)
{
struct acpi_device *device;
bool *found = context;
int result;
result = acpi_bus_get_device(obj_handle, &device);
if (result)
return AE_OK;
if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
return AE_OK;
if (!is_extended_socket_device(device))
return AE_OK;
if (acpi_check_extended_socket_status(device))
return AE_OK;
*found = true;
return AE_CTRL_TERMINATE;
}
/* fjes_init_module - Driver Registration Routine */ /* fjes_init_module - Driver Registration Routine */
static int __init fjes_init_module(void) static int __init fjes_init_module(void)
{ {
bool found = false;
int result; int result;
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
acpi_find_extended_socket_device, NULL, &found,
NULL);
if (!found)
return -ENODEV;
pr_info("%s - version %s - %s\n", pr_info("%s - version %s - %s\n",
fjes_driver_string, fjes_driver_version, fjes_copyright); fjes_driver_string, fjes_driver_version, fjes_copyright);

View File

@ -1231,8 +1231,11 @@ void netvsc_channel_cb(void *context)
return; return;
net_device = net_device_to_netvsc_device(ndev); net_device = net_device_to_netvsc_device(ndev);
if (unlikely(net_device->destroy) && if (unlikely(!net_device))
netvsc_channel_idle(net_device, q_idx)) return;
if (unlikely(net_device->destroy &&
netvsc_channel_idle(net_device, q_idx)))
return; return;
/* commit_rd_index() -> hv_signal_on_read() needs this. */ /* commit_rd_index() -> hv_signal_on_read() needs this. */

View File

@ -1931,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
return -EINVAL; return -EINVAL;
tun->set_features = features; tun->set_features = features;
tun->dev->wanted_features &= ~TUN_USER_FEATURES;
tun->dev->wanted_features |= features;
netdev_update_features(tun->dev); netdev_update_features(tun->dev);
return 0; return 0;

View File

@ -580,6 +580,10 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
.driver_info = (unsigned long)&qmi_wwan_info, .driver_info = (unsigned long)&qmi_wwan_info,
}, },
{ /* Motorola Mapphone devices with MDM6600 */
USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
.driver_info = (unsigned long)&qmi_wwan_info,
},
/* 2. Combined interface devices matching on class+protocol */ /* 2. Combined interface devices matching on class+protocol */
{ /* Huawei E367 and possibly others in "Windows mode" */ { /* Huawei E367 and possibly others in "Windows mode" */
@ -925,6 +929,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */

View File

@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08" #define NETNEXT_VERSION "08"
/* Information for net */ /* Information for net */
#define NET_VERSION "8" #define NET_VERSION "9"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@ -501,6 +501,8 @@ enum rtl_register_content {
#define RTL8153_RMS RTL8153_MAX_PACKET #define RTL8153_RMS RTL8153_MAX_PACKET
#define RTL8152_TX_TIMEOUT (5 * HZ) #define RTL8152_TX_TIMEOUT (5 * HZ)
#define RTL8152_NAPI_WEIGHT 64 #define RTL8152_NAPI_WEIGHT 64
#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
sizeof(struct rx_desc) + RX_ALIGN)
/* rtl8152 flags */ /* rtl8152 flags */
enum rtl8152_flags { enum rtl8152_flags {
@ -1362,6 +1364,7 @@ static int alloc_all_mem(struct r8152 *tp)
spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->rx_lock);
spin_lock_init(&tp->tx_lock); spin_lock_init(&tp->tx_lock);
INIT_LIST_HEAD(&tp->tx_free); INIT_LIST_HEAD(&tp->tx_free);
INIT_LIST_HEAD(&tp->rx_done);
skb_queue_head_init(&tp->tx_queue); skb_queue_head_init(&tp->tx_queue);
skb_queue_head_init(&tp->rx_queue); skb_queue_head_init(&tp->rx_queue);
@ -2252,8 +2255,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
static void r8153_set_rx_early_size(struct r8152 *tp) static void r8153_set_rx_early_size(struct r8152 *tp)
{ {
u32 mtu = tp->netdev->mtu; u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
} }
@ -2898,7 +2900,8 @@ static void r8153_first_init(struct r8152 *tp)
rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
@ -2950,7 +2953,8 @@ static void r8153_enter_oob(struct r8152 *tp)
usleep_range(1000, 2000); usleep_range(1000, 2000);
} }
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
ocp_data &= ~TEREDO_WAKE_MASK; ocp_data &= ~TEREDO_WAKE_MASK;
@ -4200,8 +4204,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu; dev->mtu = new_mtu;
if (netif_running(dev) && netif_carrier_ok(dev)) if (netif_running(dev)) {
r8153_set_rx_early_size(tp); u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
if (netif_carrier_ok(dev))
r8153_set_rx_early_size(tp);
}
mutex_unlock(&tp->control); mutex_unlock(&tp->control);

View File

@ -462,8 +462,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
} }
if (rt6_local) { if (rt6_local) {
if (rt6_local->rt6i_idev) if (rt6_local->rt6i_idev) {
in6_dev_put(rt6_local->rt6i_idev); in6_dev_put(rt6_local->rt6i_idev);
rt6_local->rt6i_idev = NULL;
}
dst = &rt6_local->dst; dst = &rt6_local->dst;
dev_put(dst->dev); dev_put(dst->dev);

View File

@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = {
.rtc_soc_base_address = 0x00000800, .rtc_soc_base_address = 0x00000800,
.rtc_wmac_base_address = 0x00001000, .rtc_wmac_base_address = 0x00001000,
.soc_core_base_address = 0x0003a000, .soc_core_base_address = 0x0003a000,
.wlan_mac_base_address = 0x00020000, .wlan_mac_base_address = 0x00010000,
.ce_wrapper_base_address = 0x00034000, .ce_wrapper_base_address = 0x00034000,
.ce0_base_address = 0x00034400, .ce0_base_address = 0x00034400,
.ce1_base_address = 0x00034800, .ce1_base_address = 0x00034800,

View File

@ -2319,7 +2319,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
{ {
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
/* Called when we need to transmit (a) frame(s) from agg queue */ /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
tids, more_data, true); tids, more_data, true);
@ -2338,7 +2338,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
if (tid_data->state != IWL_AGG_ON && if (!iwl_mvm_is_dqa_supported(mvm) &&
tid_data->state != IWL_AGG_ON &&
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
continue; continue;

View File

@ -3135,7 +3135,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason, enum ieee80211_frame_release_type reason,
u16 cnt, u16 tids, bool more_data, u16 cnt, u16 tids, bool more_data,
bool agg) bool single_sta_queue)
{ {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = { struct iwl_mvm_add_sta_cmd cmd = {
@ -3155,14 +3155,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
/* If we're releasing frames from aggregation queues then check if the /* If we're releasing frames from aggregation or dqa queues then check
* all queues combined that we're releasing frames from have * if all the queues that we're releasing frames from, combined, have:
* - more frames than the service period, in which case more_data * - more frames than the service period, in which case more_data
* needs to be set * needs to be set
* - fewer than 'cnt' frames, in which case we need to adjust the * - fewer than 'cnt' frames, in which case we need to adjust the
* firmware command (but do that unconditionally) * firmware command (but do that unconditionally)
*/ */
if (agg) { if (single_sta_queue) {
int remaining = cnt; int remaining = cnt;
int sleep_tx_count; int sleep_tx_count;
@ -3172,7 +3172,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
u16 n_queued; u16 n_queued;
tid_data = &mvmsta->tid_data[tid]; tid_data = &mvmsta->tid_data[tid];
if (WARN(tid_data->state != IWL_AGG_ON && if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
tid_data->state != IWL_AGG_ON &&
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
"TID %d state is %d\n", "TID %d state is %d\n",
tid, tid_data->state)) { tid, tid_data->state)) {

View File

@ -547,7 +547,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason, enum ieee80211_frame_release_type reason,
u16 cnt, u16 tids, bool more_data, u16 cnt, u16 tids, bool more_data,
bool agg); bool single_sta_queue);
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain); bool drain);
void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,

View File

@ -7,7 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -34,6 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -628,8 +629,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* values. * values.
* Note that we don't need to make sure it isn't agg'd, since we're * Note that we don't need to make sure it isn't agg'd, since we're
* TXing non-sta * TXing non-sta
* For DQA mode - we shouldn't increase it though
*/ */
atomic_inc(&mvm->pending_frames[sta_id]); if (!iwl_mvm_is_dqa_supported(mvm))
atomic_inc(&mvm->pending_frames[sta_id]);
return 0; return 0;
} }
@ -1005,11 +1008,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock); spin_unlock(&mvmsta->lock);
/* Increase pending frames count if this isn't AMPDU */ /* Increase pending frames count if this isn't AMPDU or DQA queue */
if ((iwl_mvm_is_dqa_supported(mvm) && if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
(!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0; return 0;
@ -1079,12 +1079,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvmsta->lock);
if ((tid_data->state == IWL_AGG_ON || if ((tid_data->state == IWL_AGG_ON ||
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
iwl_mvm_is_dqa_supported(mvm)) &&
iwl_mvm_tid_queued(tid_data) == 0) { iwl_mvm_tid_queued(tid_data) == 0) {
/* /*
* Now that this aggregation queue is empty tell mac80211 so it * Now that this aggregation or DQA queue is empty tell
* knows we no longer have frames buffered for the station on * mac80211 so it knows we no longer have frames buffered for
* this TID (for the TIM bitmap calculation.) * the station on this TID (for the TIM bitmap calculation.)
*/ */
ieee80211_sta_set_buffered(sta, tid, false); ieee80211_sta_set_buffered(sta, tid, false);
} }
@ -1257,7 +1258,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
u8 skb_freed = 0; u8 skb_freed = 0;
u16 next_reclaimed, seq_ctl; u16 next_reclaimed, seq_ctl;
bool is_ndp = false; bool is_ndp = false;
bool txq_agg = false; /* Is this TXQ aggregated */
__skb_queue_head_init(&skbs); __skb_queue_head_init(&skbs);
@ -1283,6 +1283,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_ACK;
break; break;
case TX_STATUS_FAIL_DEST_PS: case TX_STATUS_FAIL_DEST_PS:
/* In DQA, the FW should have stopped the queue and not
* return this status
*/
WARN_ON(iwl_mvm_is_dqa_supported(mvm));
info->flags |= IEEE80211_TX_STAT_TX_FILTERED; info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
break; break;
default: default:
@ -1387,15 +1391,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
bool send_eosp_ndp = false; bool send_eosp_ndp = false;
spin_lock_bh(&mvmsta->lock); spin_lock_bh(&mvmsta->lock);
if (iwl_mvm_is_dqa_supported(mvm)) {
enum iwl_mvm_agg_state state;
state = mvmsta->tid_data[tid].state;
txq_agg = (state == IWL_AGG_ON ||
state == IWL_EMPTYING_HW_QUEUE_DELBA);
} else {
txq_agg = txq_id >= mvm->first_agg_queue;
}
if (!is_ndp) { if (!is_ndp) {
tid_data->next_reclaimed = next_reclaimed; tid_data->next_reclaimed = next_reclaimed;
@ -1452,11 +1447,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed * If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out... * several skbs. Check that out...
*/ */
if (txq_agg) if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
goto out; goto out;
/* We can't free more than one frame at once on a shared queue */ /* We can't free more than one frame at once on a shared queue */
WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); WARN_ON(skb_freed > 1);
/* If we have still frames for this STA nothing to do here */ /* If we have still frames for this STA nothing to do here */
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))

View File

@ -57,8 +57,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
* In case of any errors during inittialization, this function also ensures * In case of any errors during inittialization, this function also ensures
* proper cleanup before exiting. * proper cleanup before exiting.
*/ */
static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, static int mwifiex_register(void *card, struct device *dev,
void **padapter) struct mwifiex_if_ops *if_ops, void **padapter)
{ {
struct mwifiex_adapter *adapter; struct mwifiex_adapter *adapter;
int i; int i;
@ -68,6 +68,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
return -ENOMEM; return -ENOMEM;
*padapter = adapter; *padapter = adapter;
adapter->dev = dev;
adapter->card = card; adapter->card = card;
/* Save interface specific operations in adapter */ /* Save interface specific operations in adapter */
@ -1568,12 +1569,11 @@ mwifiex_add_card(void *card, struct completion *fw_done,
{ {
struct mwifiex_adapter *adapter; struct mwifiex_adapter *adapter;
if (mwifiex_register(card, if_ops, (void **)&adapter)) { if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) {
pr_err("%s: software init failed\n", __func__); pr_err("%s: software init failed\n", __func__);
goto err_init_sw; goto err_init_sw;
} }
adapter->dev = dev;
mwifiex_probe_of(adapter); mwifiex_probe_of(adapter);
adapter->iface_type = iface_type; adapter->iface_type = iface_type;
@ -1718,6 +1718,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
wiphy_unregister(adapter->wiphy); wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy); wiphy_free(adapter->wiphy);
if (adapter->irq_wakeup >= 0)
device_init_wakeup(adapter->dev, false);
/* Unregister device */ /* Unregister device */
mwifiex_dbg(adapter, INFO, mwifiex_dbg(adapter, INFO,
"info: unregister device\n"); "info: unregister device\n");

View File

@ -2739,6 +2739,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
schedule_work(&card->work); schedule_work(&card->work);
} }
static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (reg->sleep_cookie)
mwifiex_pcie_delete_sleep_cookie_buf(adapter);
mwifiex_pcie_delete_cmdrsp_buf(adapter);
mwifiex_pcie_delete_evtbd_ring(adapter);
mwifiex_pcie_delete_rxbd_ring(adapter);
mwifiex_pcie_delete_txbd_ring(adapter);
card->cmdrsp_buf = NULL;
}
/* /*
* This function initializes the PCI-E host memory space, WCB rings, etc. * This function initializes the PCI-E host memory space, WCB rings, etc.
* *
@ -2850,13 +2865,6 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
/* /*
* This function cleans up the allocated card buffers. * This function cleans up the allocated card buffers.
*
* The following are freed by this function -
* - TXBD ring buffers
* - RXBD ring buffers
* - Event BD ring buffers
* - Command response ring buffer
* - Sleep cookie buffer
*/ */
static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
{ {
@ -2875,6 +2883,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
"Failed to write driver not-ready signature\n"); "Failed to write driver not-ready signature\n");
} }
mwifiex_pcie_free_buffers(adapter);
if (pdev) { if (pdev) {
pci_iounmap(pdev, card->pci_mmap); pci_iounmap(pdev, card->pci_mmap);
pci_iounmap(pdev, card->pci_mmap1); pci_iounmap(pdev, card->pci_mmap1);
@ -3126,10 +3136,7 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
pci_iounmap(pdev, card->pci_mmap1); pci_iounmap(pdev, card->pci_mmap1);
} }
/* This function cleans up the PCI-E host memory space. /* This function cleans up the PCI-E host memory space. */
* Some code is extracted from mwifiex_unregister_dev()
*
*/
static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
{ {
struct pcie_service_card *card = adapter->card; struct pcie_service_card *card = adapter->card;
@ -3140,14 +3147,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
adapter->seq_num = 0; adapter->seq_num = 0;
if (reg->sleep_cookie) mwifiex_pcie_free_buffers(adapter);
mwifiex_pcie_delete_sleep_cookie_buf(adapter);
mwifiex_pcie_delete_cmdrsp_buf(adapter);
mwifiex_pcie_delete_evtbd_ring(adapter);
mwifiex_pcie_delete_rxbd_ring(adapter);
mwifiex_pcie_delete_txbd_ring(adapter);
card->cmdrsp_buf = NULL;
} }
static struct mwifiex_if_ops pcie_ops = { static struct mwifiex_if_ops pcie_ops = {

View File

@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
return len; return len;
} }
static int
vhost_transport_cancel_pkt(struct vsock_sock *vsk)
{
struct vhost_vsock *vsock;
struct virtio_vsock_pkt *pkt, *n;
int cnt = 0;
LIST_HEAD(freeme);
/* Find the vhost_vsock according to guest context id */
vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
if (!vsock)
return -ENODEV;
spin_lock_bh(&vsock->send_pkt_list_lock);
list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
if (pkt->vsk != vsk)
continue;
list_move(&pkt->list, &freeme);
}
spin_unlock_bh(&vsock->send_pkt_list_lock);
list_for_each_entry_safe(pkt, n, &freeme, list) {
if (pkt->reply)
cnt++;
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}
if (cnt) {
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
int new_cnt;
new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
vhost_poll_queue(&tx_vq->poll);
}
return 0;
}
static struct virtio_vsock_pkt * static struct virtio_vsock_pkt *
vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
unsigned int out, unsigned int in) unsigned int out, unsigned int in)
@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = {
.release = virtio_transport_release, .release = virtio_transport_release,
.connect = virtio_transport_connect, .connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown, .shutdown = virtio_transport_shutdown,
.cancel_pkt = vhost_transport_cancel_pkt,
.dgram_enqueue = virtio_transport_dgram_enqueue, .dgram_enqueue = virtio_transport_dgram_enqueue,
.dgram_dequeue = virtio_transport_dgram_dequeue, .dgram_dequeue = virtio_transport_dgram_dequeue,

View File

@ -20,6 +20,8 @@ struct sock_exterr_skb {
struct sock_extended_err ee; struct sock_extended_err ee;
u16 addr_offset; u16 addr_offset;
__be16 port; __be16 port;
u8 opt_stats:1,
unused:7;
}; };
#endif #endif

View File

@ -476,6 +476,7 @@ enum {
enum { enum {
MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_UP = 1 << 0,
MLX4_INTERFACE_STATE_DELETION = 1 << 1, MLX4_INTERFACE_STATE_DELETION = 1 << 1,
MLX4_INTERFACE_STATE_NOWAIT = 1 << 2,
}; };
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \

View File

@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
struct virtio_vsock_hdr hdr; struct virtio_vsock_hdr hdr;
struct work_struct work; struct work_struct work;
struct list_head list; struct list_head list;
/* socket refcnt not held, only use for cancellation */
struct vsock_sock *vsk;
void *buf; void *buf;
u32 len; u32 len;
u32 off; u32 off;
@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
struct virtio_vsock_pkt_info { struct virtio_vsock_pkt_info {
u32 remote_cid, remote_port; u32 remote_cid, remote_port;
struct vsock_sock *vsk;
struct msghdr *msg; struct msghdr *msg;
u32 pkt_len; u32 pkt_len;
u16 type; u16 type;

View File

@ -100,6 +100,9 @@ struct vsock_transport {
void (*destruct)(struct vsock_sock *); void (*destruct)(struct vsock_sock *);
void (*release)(struct vsock_sock *); void (*release)(struct vsock_sock *);
/* Cancel all pending packets sent on vsock. */
int (*cancel_pkt)(struct vsock_sock *vsk);
/* Connections. */ /* Connections. */
int (*connect)(struct vsock_sock *); int (*connect)(struct vsock_sock *);

View File

@ -244,7 +244,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
u32 seq); u32 seq);
/* Fake conntrack entry for untracked connections */ /* Fake conntrack entry for untracked connections */
DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
static inline struct nf_conn *nf_ct_untracked_get(void) static inline struct nf_conn *nf_ct_untracked_get(void)
{ {
return raw_cpu_ptr(&nf_conntrack_untracked); return raw_cpu_ptr(&nf_conntrack_untracked);

View File

@ -103,6 +103,35 @@ struct nft_regs {
}; };
}; };
/* Store/load an u16 or u8 integer to/from the u32 data register.
*
* Note, when using concatenations, register allocation happens at 32-bit
* level. So for store instruction, pad the rest part with zero to avoid
* garbage values.
*/
static inline void nft_reg_store16(u32 *dreg, u16 val)
{
*dreg = 0;
*(u16 *)dreg = val;
}
static inline void nft_reg_store8(u32 *dreg, u8 val)
{
*dreg = 0;
*(u8 *)dreg = val;
}
static inline u16 nft_reg_load16(u32 *sreg)
{
return *(u16 *)sreg;
}
static inline u8 nft_reg_load8(u32 *sreg)
{
return *(u8 *)sreg;
}
static inline void nft_data_copy(u32 *dst, const struct nft_data *src, static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
unsigned int len) unsigned int len)
{ {
@ -203,7 +232,6 @@ struct nft_set_elem {
struct nft_set; struct nft_set;
struct nft_set_iter { struct nft_set_iter {
u8 genmask; u8 genmask;
bool flush;
unsigned int count; unsigned int count;
unsigned int skip; unsigned int skip;
int err; int err;

View File

@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
struct sk_buff *skb, struct sk_buff *skb,
const struct nf_hook_state *state) const struct nf_hook_state *state)
{ {
unsigned int flags = IP6_FH_F_AUTH;
int protohdr, thoff = 0; int protohdr, thoff = 0;
unsigned short frag_off; unsigned short frag_off;
nft_set_pktinfo(pkt, skb, state); nft_set_pktinfo(pkt, skb, state);
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0) { if (protohdr < 0) {
nft_set_pktinfo_proto_unspec(pkt, skb); nft_set_pktinfo_proto_unspec(pkt, skb);
return; return;
@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
const struct nf_hook_state *state) const struct nf_hook_state *state)
{ {
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
unsigned int flags = IP6_FH_F_AUTH;
struct ipv6hdr *ip6h, _ip6h; struct ipv6hdr *ip6h, _ip6h;
unsigned int thoff = 0; unsigned int thoff = 0;
unsigned short frag_off; unsigned short frag_off;
@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
if (pkt_len + sizeof(*ip6h) > skb->len) if (pkt_len + sizeof(*ip6h) > skb->len)
return -1; return -1;
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0) if (protohdr < 0)
return -1; return -1;

View File

@ -83,6 +83,7 @@ struct sctp_bind_addr;
struct sctp_ulpq; struct sctp_ulpq;
struct sctp_ep_common; struct sctp_ep_common;
struct crypto_shash; struct crypto_shash;
struct sctp_stream;
#include <net/sctp/tsnmap.h> #include <net/sctp/tsnmap.h>
@ -753,6 +754,8 @@ struct sctp_transport {
/* Is the Path MTU update pending on this tranport */ /* Is the Path MTU update pending on this tranport */
pmtu_pending:1, pmtu_pending:1,
dst_pending_confirm:1, /* need to confirm neighbour */
/* Has this transport moved the ctsn since we last sacked */ /* Has this transport moved the ctsn since we last sacked */
sack_generation:1; sack_generation:1;
u32 dst_cookie; u32 dst_cookie;
@ -806,8 +809,6 @@ struct sctp_transport {
__u32 burst_limited; /* Holds old cwnd when max.burst is applied */ __u32 burst_limited; /* Holds old cwnd when max.burst is applied */
__u32 dst_pending_confirm; /* need to confirm neighbour */
/* Destination */ /* Destination */
struct dst_entry *dst; struct dst_entry *dst;
/* Source address. */ /* Source address. */

View File

@ -30,18 +30,12 @@ struct bpf_htab {
struct pcpu_freelist freelist; struct pcpu_freelist freelist;
struct bpf_lru lru; struct bpf_lru lru;
}; };
void __percpu *extra_elems; struct htab_elem *__percpu *extra_elems;
atomic_t count; /* number of elements in this hashtable */ atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */ u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */ u32 elem_size; /* size of each element in bytes */
}; };
enum extra_elem_state {
HTAB_NOT_AN_EXTRA_ELEM = 0,
HTAB_EXTRA_ELEM_FREE,
HTAB_EXTRA_ELEM_USED
};
/* each htab element is struct htab_elem + key + value */ /* each htab element is struct htab_elem + key + value */
struct htab_elem { struct htab_elem {
union { union {
@ -56,7 +50,6 @@ struct htab_elem {
}; };
union { union {
struct rcu_head rcu; struct rcu_head rcu;
enum extra_elem_state state;
struct bpf_lru_node lru_node; struct bpf_lru_node lru_node;
}; };
u32 hash; u32 hash;
@ -77,6 +70,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
} }
static bool htab_is_prealloc(const struct bpf_htab *htab)
{
return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
}
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
void __percpu *pptr) void __percpu *pptr)
{ {
@ -128,17 +126,20 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
static int prealloc_init(struct bpf_htab *htab) static int prealloc_init(struct bpf_htab *htab)
{ {
u32 num_entries = htab->map.max_entries;
int err = -ENOMEM, i; int err = -ENOMEM, i;
htab->elems = bpf_map_area_alloc(htab->elem_size * if (!htab_is_percpu(htab) && !htab_is_lru(htab))
htab->map.max_entries); num_entries += num_possible_cpus();
htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries);
if (!htab->elems) if (!htab->elems)
return -ENOMEM; return -ENOMEM;
if (!htab_is_percpu(htab)) if (!htab_is_percpu(htab))
goto skip_percpu_elems; goto skip_percpu_elems;
for (i = 0; i < htab->map.max_entries; i++) { for (i = 0; i < num_entries; i++) {
u32 size = round_up(htab->map.value_size, 8); u32 size = round_up(htab->map.value_size, 8);
void __percpu *pptr; void __percpu *pptr;
@ -166,11 +167,11 @@ static int prealloc_init(struct bpf_htab *htab)
if (htab_is_lru(htab)) if (htab_is_lru(htab))
bpf_lru_populate(&htab->lru, htab->elems, bpf_lru_populate(&htab->lru, htab->elems,
offsetof(struct htab_elem, lru_node), offsetof(struct htab_elem, lru_node),
htab->elem_size, htab->map.max_entries); htab->elem_size, num_entries);
else else
pcpu_freelist_populate(&htab->freelist, pcpu_freelist_populate(&htab->freelist,
htab->elems + offsetof(struct htab_elem, fnode), htab->elems + offsetof(struct htab_elem, fnode),
htab->elem_size, htab->map.max_entries); htab->elem_size, num_entries);
return 0; return 0;
@ -191,16 +192,22 @@ static void prealloc_destroy(struct bpf_htab *htab)
static int alloc_extra_elems(struct bpf_htab *htab) static int alloc_extra_elems(struct bpf_htab *htab)
{ {
void __percpu *pptr; struct htab_elem *__percpu *pptr, *l_new;
struct pcpu_freelist_node *l;
int cpu; int cpu;
pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN); pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
GFP_USER | __GFP_NOWARN);
if (!pptr) if (!pptr)
return -ENOMEM; return -ENOMEM;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state = l = pcpu_freelist_pop(&htab->freelist);
HTAB_EXTRA_ELEM_FREE; /* pop will succeed, since prealloc_init()
* preallocated extra num_possible_cpus elements
*/
l_new = container_of(l, struct htab_elem, fnode);
*per_cpu_ptr(pptr, cpu) = l_new;
} }
htab->extra_elems = pptr; htab->extra_elems = pptr;
return 0; return 0;
@ -342,25 +349,25 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
raw_spin_lock_init(&htab->buckets[i].lock); raw_spin_lock_init(&htab->buckets[i].lock);
} }
if (!percpu && !lru) {
/* lru itself can remove the least used element, so
* there is no need for an extra elem during map_update.
*/
err = alloc_extra_elems(htab);
if (err)
goto free_buckets;
}
if (prealloc) { if (prealloc) {
err = prealloc_init(htab); err = prealloc_init(htab);
if (err) if (err)
goto free_extra_elems; goto free_buckets;
if (!percpu && !lru) {
/* lru itself can remove the least used element, so
* there is no need for an extra elem during map_update.
*/
err = alloc_extra_elems(htab);
if (err)
goto free_prealloc;
}
} }
return &htab->map; return &htab->map;
free_extra_elems: free_prealloc:
free_percpu(htab->extra_elems); prealloc_destroy(htab);
free_buckets: free_buckets:
bpf_map_area_free(htab->buckets); bpf_map_area_free(htab->buckets);
free_htab: free_htab:
@ -575,12 +582,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{ {
if (l->state == HTAB_EXTRA_ELEM_USED) { if (htab_is_prealloc(htab)) {
l->state = HTAB_EXTRA_ELEM_FREE;
return;
}
if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
pcpu_freelist_push(&htab->freelist, &l->fnode); pcpu_freelist_push(&htab->freelist, &l->fnode);
} else { } else {
atomic_dec(&htab->count); atomic_dec(&htab->count);
@ -610,47 +612,43 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
void *value, u32 key_size, u32 hash, void *value, u32 key_size, u32 hash,
bool percpu, bool onallcpus, bool percpu, bool onallcpus,
bool old_elem_exists) struct htab_elem *old_elem)
{ {
u32 size = htab->map.value_size; u32 size = htab->map.value_size;
bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); bool prealloc = htab_is_prealloc(htab);
struct htab_elem *l_new; struct htab_elem *l_new, **pl_new;
void __percpu *pptr; void __percpu *pptr;
int err = 0;
if (prealloc) { if (prealloc) {
struct pcpu_freelist_node *l; if (old_elem) {
/* if we're updating the existing element,
l = pcpu_freelist_pop(&htab->freelist); * use per-cpu extra elems to avoid freelist_pop/push
if (!l) */
err = -E2BIG; pl_new = this_cpu_ptr(htab->extra_elems);
else l_new = *pl_new;
l_new = container_of(l, struct htab_elem, fnode); *pl_new = old_elem;
} else {
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
atomic_dec(&htab->count);
err = -E2BIG;
} else { } else {
l_new = kmalloc(htab->elem_size, struct pcpu_freelist_node *l;
GFP_ATOMIC | __GFP_NOWARN);
if (!l_new) l = pcpu_freelist_pop(&htab->freelist);
return ERR_PTR(-ENOMEM); if (!l)
return ERR_PTR(-E2BIG);
l_new = container_of(l, struct htab_elem, fnode);
} }
}
if (err) {
if (!old_elem_exists)
return ERR_PTR(err);
/* if we're updating the existing element and the hash table
* is full, use per-cpu extra elems
*/
l_new = this_cpu_ptr(htab->extra_elems);
if (l_new->state != HTAB_EXTRA_ELEM_FREE)
return ERR_PTR(-E2BIG);
l_new->state = HTAB_EXTRA_ELEM_USED;
} else { } else {
l_new->state = HTAB_NOT_AN_EXTRA_ELEM; if (atomic_inc_return(&htab->count) > htab->map.max_entries)
if (!old_elem) {
/* when map is full and update() is replacing
* old element, it's ok to allocate, since
* old element will be freed immediately.
* Otherwise return an error
*/
atomic_dec(&htab->count);
return ERR_PTR(-E2BIG);
}
l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
if (!l_new)
return ERR_PTR(-ENOMEM);
} }
memcpy(l_new->key, key, key_size); memcpy(l_new->key, key, key_size);
@ -731,7 +729,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
goto err; goto err;
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
!!l_old); l_old);
if (IS_ERR(l_new)) { if (IS_ERR(l_new)) {
/* all pre-allocated elements are in use or memory exhausted */ /* all pre-allocated elements are in use or memory exhausted */
ret = PTR_ERR(l_new); ret = PTR_ERR(l_new);
@ -744,7 +742,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
hlist_nulls_add_head_rcu(&l_new->hash_node, head); hlist_nulls_add_head_rcu(&l_new->hash_node, head);
if (l_old) { if (l_old) {
hlist_nulls_del_rcu(&l_old->hash_node); hlist_nulls_del_rcu(&l_old->hash_node);
free_htab_elem(htab, l_old); if (!htab_is_prealloc(htab))
free_htab_elem(htab, l_old);
} }
ret = 0; ret = 0;
err: err:
@ -856,7 +855,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
value, onallcpus); value, onallcpus);
} else { } else {
l_new = alloc_htab_elem(htab, key, value, key_size, l_new = alloc_htab_elem(htab, key, value, key_size,
hash, true, onallcpus, false); hash, true, onallcpus, NULL);
if (IS_ERR(l_new)) { if (IS_ERR(l_new)) {
ret = PTR_ERR(l_new); ret = PTR_ERR(l_new);
goto err; goto err;
@ -1024,8 +1023,7 @@ static void delete_all_elements(struct bpf_htab *htab)
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
hlist_nulls_del_rcu(&l->hash_node); hlist_nulls_del_rcu(&l->hash_node);
if (l->state != HTAB_EXTRA_ELEM_USED) htab_elem_free(htab, l);
htab_elem_free(htab, l);
} }
} }
} }
@ -1045,7 +1043,7 @@ static void htab_map_free(struct bpf_map *map)
* not have executed. Wait for them. * not have executed. Wait for them.
*/ */
rcu_barrier(); rcu_barrier();
if (htab->map.map_flags & BPF_F_NO_PREALLOC) if (!htab_is_prealloc(htab))
delete_all_elements(htab); delete_all_elements(htab);
else else
prealloc_destroy(htab); prealloc_destroy(htab);

View File

@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
batadv_iv_ogm_schedule(hard_iface); batadv_iv_ogm_schedule(hard_iface);
} }
/**
* batadv_iv_init_sel_class - initialize GW selection class
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
{
/* set default TQ difference threshold to 20 */
atomic_set(&bat_priv->gw.sel_class, 20);
}
static struct batadv_gw_node * static struct batadv_gw_node *
batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{ {
@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.del_if = batadv_iv_ogm_orig_del_if, .del_if = batadv_iv_ogm_orig_del_if,
}, },
.gw = { .gw = {
.init_sel_class = batadv_iv_init_sel_class,
.get_best_gw_node = batadv_iv_gw_get_best_gw_node, .get_best_gw_node = batadv_iv_gw_get_best_gw_node,
.is_eligible = batadv_iv_gw_is_eligible, .is_eligible = batadv_iv_gw_is_eligible,
#ifdef CONFIG_BATMAN_ADV_DEBUGFS #ifdef CONFIG_BATMAN_ADV_DEBUGFS

View File

@ -668,6 +668,16 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
return ret; return ret;
} }
/**
* batadv_v_init_sel_class - initialize GW selection class
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
{
/* set default throughput difference threshold to 5Mbps */
atomic_set(&bat_priv->gw.sel_class, 50);
}
static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
char *buff, size_t count) char *buff, size_t count)
{ {
@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.dump = batadv_v_orig_dump, .dump = batadv_v_orig_dump,
}, },
.gw = { .gw = {
.init_sel_class = batadv_v_init_sel_class,
.store_sel_class = batadv_v_store_sel_class, .store_sel_class = batadv_v_store_sel_class,
.show_sel_class = batadv_v_show_sel_class, .show_sel_class = batadv_v_show_sel_class,
.get_best_gw_node = batadv_v_gw_get_best_gw_node, .get_best_gw_node = batadv_v_gw_get_best_gw_node,
@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
if (ret < 0) if (ret < 0)
return ret; return ret;
/* set default throughput difference threshold to 5Mbps */
atomic_set(&bat_priv->gw.sel_class, 50);
return 0; return 0;
} }

View File

@ -404,7 +404,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
* batadv_frag_create - create a fragment from skb * batadv_frag_create - create a fragment from skb
* @skb: skb to create fragment from * @skb: skb to create fragment from
* @frag_head: header to use in new fragment * @frag_head: header to use in new fragment
* @mtu: size of new fragment * @fragment_size: size of new fragment
* *
* Split the passed skb into two fragments: A new one with size matching the * Split the passed skb into two fragments: A new one with size matching the
* passed mtu and the old one with the rest. The new skb contains data from the * passed mtu and the old one with the rest. The new skb contains data from the
@ -414,11 +414,11 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
*/ */
static struct sk_buff *batadv_frag_create(struct sk_buff *skb, static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
struct batadv_frag_packet *frag_head, struct batadv_frag_packet *frag_head,
unsigned int mtu) unsigned int fragment_size)
{ {
struct sk_buff *skb_fragment; struct sk_buff *skb_fragment;
unsigned int header_size = sizeof(*frag_head); unsigned int header_size = sizeof(*frag_head);
unsigned int fragment_size = mtu - header_size; unsigned int mtu = fragment_size + header_size;
skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
if (!skb_fragment) if (!skb_fragment)
@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
struct sk_buff *skb_fragment; struct sk_buff *skb_fragment;
unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
unsigned int header_size = sizeof(frag_header); unsigned int header_size = sizeof(frag_header);
unsigned int max_fragment_size, max_packet_size; unsigned int max_fragment_size, num_fragments;
int ret; int ret;
/* To avoid merge and refragmentation at next-hops we never send /* To avoid merge and refragmentation at next-hops we never send
@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb,
*/ */
mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
max_fragment_size = mtu - header_size; max_fragment_size = mtu - header_size;
max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
if (skb->len == 0 || max_fragment_size == 0)
return -EINVAL;
num_fragments = (skb->len - 1) / max_fragment_size + 1;
max_fragment_size = (skb->len - 1) / num_fragments + 1;
/* Don't even try to fragment, if we need more than 16 fragments */ /* Don't even try to fragment, if we need more than 16 fragments */
if (skb->len > max_packet_size) { if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
ret = -EAGAIN; ret = -EAGAIN;
goto free_skb; goto free_skb;
} }
@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
goto put_primary_if; goto put_primary_if;
} }
skb_fragment = batadv_frag_create(skb, &frag_header, mtu); skb_fragment = batadv_frag_create(skb, &frag_header,
max_fragment_size);
if (!skb_fragment) { if (!skb_fragment) {
ret = -ENOMEM; ret = -ENOMEM;
goto put_primary_if; goto put_primary_if;

View File

@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
*/ */
void batadv_gw_init(struct batadv_priv *bat_priv) void batadv_gw_init(struct batadv_priv *bat_priv)
{ {
if (bat_priv->algo_ops->gw.init_sel_class)
bat_priv->algo_ops->gw.init_sel_class(bat_priv);
else
atomic_set(&bat_priv->gw.sel_class, 1);
batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1, batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
NULL, BATADV_TVLV_GW, 1, NULL, BATADV_TVLV_GW, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND); BATADV_TVLV_HANDLER_OGM_CIFNOTFND);

View File

@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev)
atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
#endif #endif
atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
atomic_set(&bat_priv->gw.sel_class, 20);
atomic_set(&bat_priv->gw.bandwidth_down, 100); atomic_set(&bat_priv->gw.bandwidth_down, 100);
atomic_set(&bat_priv->gw.bandwidth_up, 20); atomic_set(&bat_priv->gw.bandwidth_up, 20);
atomic_set(&bat_priv->orig_interval, 1000); atomic_set(&bat_priv->orig_interval, 1000);

View File

@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops {
/** /**
* struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
* @init_sel_class: initialize GW selection class (optional)
* @store_sel_class: parse and stores a new GW selection class (optional) * @store_sel_class: parse and stores a new GW selection class (optional)
* @show_sel_class: prints the current GW selection class (optional) * @show_sel_class: prints the current GW selection class (optional)
* @get_best_gw_node: select the best GW from the list of available nodes * @get_best_gw_node: select the best GW from the list of available nodes
@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops {
* @dump: dump gateways to a netlink socket (optional) * @dump: dump gateways to a netlink socket (optional)
*/ */
struct batadv_algo_gw_ops { struct batadv_algo_gw_ops {
void (*init_sel_class)(struct batadv_priv *bat_priv);
ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
size_t count); size_t count);
ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff); ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);

View File

@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb; struct net_bridge_fdb_entry *fdb;
WARN_ON_ONCE(!br_hash_lock_held(br)); lockdep_assert_held_once(&br->hash_lock);
rcu_read_lock(); rcu_read_lock();
fdb = fdb_find_rcu(head, addr, vid); fdb = fdb_find_rcu(head, addr, vid);

View File

@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{ {
struct nf_bridge_info *nf_bridge; struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
unsigned int mtu_reserved; unsigned int mtu, mtu_reserved;
mtu_reserved = nf_bridge_mtu_reduction(skb); mtu_reserved = nf_bridge_mtu_reduction(skb);
mtu = skb->dev->mtu;
if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) { if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
mtu = nf_bridge->frag_max_size;
if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
nf_bridge_info_free(skb); nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(net, sk, skb); return br_dev_queue_push_xmit(net, sk, skb);
} }
nf_bridge = nf_bridge_info_get(skb);
/* This is wrong! We should preserve the original fragment /* This is wrong! We should preserve the original fragment
* boundaries by preserving frag_list rather than refragmenting. * boundaries by preserving frag_list rather than refragmenting.
*/ */

View File

@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid); const unsigned char *addr, u16 vid);
static inline bool br_hash_lock_held(struct net_bridge *br)
{
#ifdef CONFIG_LOCKDEP
return lockdep_is_held(&br->hash_lock);
#else
return true;
#endif
}
/* br_forward.c */ /* br_forward.c */
enum br_pkt_type { enum br_pkt_type {
BR_PKT_UNICAST, BR_PKT_UNICAST,

View File

@ -71,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
return 0; return 0;
} }
static void update_classid(struct cgroup_subsys_state *css, void *v)
{
struct css_task_iter it;
struct task_struct *p;
css_task_iter_start(css, &it);
while ((p = css_task_iter_next(&it))) {
task_lock(p);
iterate_fd(p->files, 0, update_classid_sock, v);
task_unlock(p);
}
css_task_iter_end(&it);
}
static void cgrp_attach(struct cgroup_taskset *tset) static void cgrp_attach(struct cgroup_taskset *tset)
{ {
struct cgroup_subsys_state *css; struct cgroup_subsys_state *css;
struct task_struct *p;
cgroup_taskset_first(tset, &css); cgroup_taskset_for_each(p, css, tset) {
update_classid(css, task_lock(p);
(void *)(unsigned long)css_cls_state(css)->classid); iterate_fd(p->files, 0, update_classid_sock,
(void *)(unsigned long)css_cls_state(css)->classid);
task_unlock(p);
}
} }
static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@ -103,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
u64 value) u64 value)
{ {
struct cgroup_cls_state *cs = css_cls_state(css); struct cgroup_cls_state *cs = css_cls_state(css);
struct css_task_iter it;
struct task_struct *p;
cgroup_sk_alloc_disable(); cgroup_sk_alloc_disable();
cs->classid = (u32)value; cs->classid = (u32)value;
update_classid(css, (void *)(unsigned long)cs->classid); css_task_iter_start(css, &it);
while ((p = css_task_iter_next(&it))) {
task_lock(p);
iterate_fd(p->files, 0, update_classid_sock,
(void *)(unsigned long)cs->classid);
task_unlock(p);
}
css_task_iter_end(&it);
return 0; return 0;
} }

View File

@ -3694,6 +3694,15 @@ static void sock_rmem_free(struct sk_buff *skb)
atomic_sub(skb->truesize, &sk->sk_rmem_alloc); atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
} }
static void skb_set_err_queue(struct sk_buff *skb)
{
/* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
* So, it is safe to (mis)use it to mark skbs on the error queue.
*/
skb->pkt_type = PACKET_OUTGOING;
BUILD_BUG_ON(PACKET_OUTGOING == 0);
}
/* /*
* Note: We dont mem charge error packets (no sk_forward_alloc changes) * Note: We dont mem charge error packets (no sk_forward_alloc changes)
*/ */
@ -3707,6 +3716,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb->sk = sk; skb->sk = sk;
skb->destructor = sock_rmem_free; skb->destructor = sock_rmem_free;
atomic_add(skb->truesize, &sk->sk_rmem_alloc); atomic_add(skb->truesize, &sk->sk_rmem_alloc);
skb_set_err_queue(skb);
/* before exiting rcu section, make sure dst is refcounted */ /* before exiting rcu section, make sure dst is refcounted */
skb_dst_force(skb); skb_dst_force(skb);
@ -3783,16 +3793,20 @@ EXPORT_SYMBOL(skb_clone_sk);
static void __skb_complete_tx_timestamp(struct sk_buff *skb, static void __skb_complete_tx_timestamp(struct sk_buff *skb,
struct sock *sk, struct sock *sk,
int tstype) int tstype,
bool opt_stats)
{ {
struct sock_exterr_skb *serr; struct sock_exterr_skb *serr;
int err; int err;
BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
serr = SKB_EXT_ERR(skb); serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr)); memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = ENOMSG; serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
serr->ee.ee_info = tstype; serr->ee.ee_info = tstype;
serr->opt_stats = opt_stats;
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey; serr->ee.ee_data = skb_shinfo(skb)->tskey;
if (sk->sk_protocol == IPPROTO_TCP && if (sk->sk_protocol == IPPROTO_TCP &&
@ -3833,7 +3847,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
*/ */
if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
*skb_hwtstamps(skb) = *hwtstamps; *skb_hwtstamps(skb) = *hwtstamps;
__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
sock_put(sk); sock_put(sk);
} }
} }
@ -3844,7 +3858,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
struct sock *sk, int tstype) struct sock *sk, int tstype)
{ {
struct sk_buff *skb; struct sk_buff *skb;
bool tsonly; bool tsonly, opt_stats = false;
if (!sk) if (!sk)
return; return;
@ -3857,9 +3871,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
#ifdef CONFIG_INET #ifdef CONFIG_INET
if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
sk->sk_protocol == IPPROTO_TCP && sk->sk_protocol == IPPROTO_TCP &&
sk->sk_type == SOCK_STREAM) sk->sk_type == SOCK_STREAM) {
skb = tcp_get_timestamping_opt_stats(sk); skb = tcp_get_timestamping_opt_stats(sk);
else opt_stats = true;
} else
#endif #endif
skb = alloc_skb(0, GFP_ATOMIC); skb = alloc_skb(0, GFP_ATOMIC);
} else { } else {
@ -3878,7 +3893,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
else else
skb->tstamp = ktime_get_real(); skb->tstamp = ktime_get_real();
__skb_complete_tx_timestamp(skb, sk, tstype); __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
} }
EXPORT_SYMBOL_GPL(__skb_tstamp_tx); EXPORT_SYMBOL_GPL(__skb_tstamp_tx);

View File

@ -1442,6 +1442,11 @@ static void __sk_destruct(struct rcu_head *head)
pr_debug("%s: optmem leakage (%d bytes) detected\n", pr_debug("%s: optmem leakage (%d bytes) detected\n",
__func__, atomic_read(&sk->sk_omem_alloc)); __func__, atomic_read(&sk->sk_omem_alloc));
if (sk->sk_frag.page) {
put_page(sk->sk_frag.page);
sk->sk_frag.page = NULL;
}
if (sk->sk_peer_cred) if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred); put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid); put_pid(sk->sk_peer_pid);
@ -1539,6 +1544,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
is_charged = sk_filter_charge(newsk, filter); is_charged = sk_filter_charge(newsk, filter);
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
/* We need to make sure that we don't uncharge the new
* socket if we couldn't charge it in the first place
* as otherwise we uncharge the parent's filter.
*/
if (!is_charged)
RCU_INIT_POINTER(newsk->sk_filter, NULL);
sk_free_unlock_clone(newsk); sk_free_unlock_clone(newsk);
newsk = NULL; newsk = NULL;
goto out; goto out;
@ -2787,11 +2798,6 @@ void sk_common_release(struct sock *sk)
sk_refcnt_debug_release(sk); sk_refcnt_debug_release(sk);
if (sk->sk_frag.page) {
put_page(sk->sk_frag.page);
sk->sk_frag.page = NULL;
}
sock_put(sk); sock_put(sk);
} }
EXPORT_SYMBOL(sk_common_release); EXPORT_SYMBOL(sk_common_release);

View File

@ -1083,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb)
net = sock_net(skb->sk); net = sock_net(skb->sk);
nlh = nlmsg_hdr(skb); nlh = nlmsg_hdr(skb);
if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(*frn)) nlmsg_len(nlh) < sizeof(*frn))
return; return;

View File

@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
net = container_of(qp->q.net, struct net, ipv4.frags); net = container_of(qp->q.net, struct net, ipv4.frags);
rcu_read_lock();
spin_lock(&qp->q.lock); spin_lock(&qp->q.lock);
if (qp->q.flags & INET_FRAG_COMPLETE) if (qp->q.flags & INET_FRAG_COMPLETE)
@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
if (!inet_frag_evicting(&qp->q)) { if (!inet_frag_evicting(&qp->q)) {
struct sk_buff *head = qp->q.fragments; struct sk_buff *clone, *head = qp->q.fragments;
const struct iphdr *iph; const struct iphdr *iph;
int err; int err;
@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
goto out; goto out;
rcu_read_lock();
head->dev = dev_get_by_index_rcu(net, qp->iif); head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev) if (!head->dev)
goto out_rcu_unlock; goto out;
/* skb has no dst, perform route lookup again */ /* skb has no dst, perform route lookup again */
iph = ip_hdr(head); iph = ip_hdr(head);
err = ip_route_input_noref(head, iph->daddr, iph->saddr, err = ip_route_input_noref(head, iph->daddr, iph->saddr,
iph->tos, head->dev); iph->tos, head->dev);
if (err) if (err)
goto out_rcu_unlock; goto out;
/* Only an end host needs to send an ICMP /* Only an end host needs to send an ICMP
* "Fragment Reassembly Timeout" message, per RFC792. * "Fragment Reassembly Timeout" message, per RFC792.
*/ */
if (frag_expire_skip_icmp(qp->user) && if (frag_expire_skip_icmp(qp->user) &&
(skb_rtable(head)->rt_type != RTN_LOCAL)) (skb_rtable(head)->rt_type != RTN_LOCAL))
goto out_rcu_unlock; goto out;
clone = skb_clone(head, GFP_ATOMIC);
/* Send an ICMP "Fragment Reassembly Timeout" message. */ /* Send an ICMP "Fragment Reassembly Timeout" message. */
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); if (clone) {
out_rcu_unlock: spin_unlock(&qp->q.lock);
rcu_read_unlock(); icmp_send(clone, ICMP_TIME_EXCEEDED,
ICMP_EXC_FRAGTIME, 0);
consume_skb(clone);
goto out_rcu_unlock;
}
} }
out: out:
spin_unlock(&qp->q.lock); spin_unlock(&qp->q.lock);
out_rcu_unlock:
rcu_read_unlock();
ipq_put(qp); ipq_put(qp);
} }

View File

@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
if (skb->len < sizeof(struct iphdr) || if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr)) ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT; return NF_ACCEPT;
if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
return NF_ACCEPT;
return nf_conntrack_in(state->net, PF_INET, state->hook, skb); return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
} }

View File

@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
/* maniptype == SRC for postrouting. */ /* maniptype == SRC for postrouting. */
enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook); enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
/* We never see fragments: conntrack defrags on pre-routing
* and local-out, and nf_nat_out protects post-routing.
*/
NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
ct = nf_ct_get(skb, &ctinfo); ct = nf_ct_get(skb, &ctinfo);
/* Can't track? It's not due to stress, or conntrack would /* Can't track? It's not due to stress, or conntrack would
* have dropped it. Hence it's the user's responsibilty to * have dropped it. Hence it's the user's responsibilty to

View File

@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range)); memset(&range, 0, sizeof(range));
range.flags = priv->flags; range.flags = priv->flags;
if (priv->sreg_proto_min) { if (priv->sreg_proto_min) {
range.min_proto.all = range.min_proto.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_min]; &regs->data[priv->sreg_proto_min]);
range.max_proto.all = range.max_proto.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_max]; &regs->data[priv->sreg_proto_max]);
} }
regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt), regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
&range, nft_out(pkt)); &range, nft_out(pkt));

View File

@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
memset(&mr, 0, sizeof(mr)); memset(&mr, 0, sizeof(mr));
if (priv->sreg_proto_min) { if (priv->sreg_proto_min) {
mr.range[0].min.all = mr.range[0].min.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_min]; &regs->data[priv->sreg_proto_min]);
mr.range[0].max.all = mr.range[0].max.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_max]; &regs->data[priv->sreg_proto_max]);
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
} }

View File

@ -2770,7 +2770,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
{ {
const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
u32 now = tcp_time_stamp, intv; u32 now, intv;
u64 rate64; u64 rate64;
bool slow; bool slow;
u32 rate; u32 rate;
@ -2839,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_retrans = tp->retrans_out; info->tcpi_retrans = tp->retrans_out;
info->tcpi_fackets = tp->fackets_out; info->tcpi_fackets = tp->fackets_out;
now = tcp_time_stamp;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);

View File

@ -5541,6 +5541,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
tcp_set_state(sk, TCP_ESTABLISHED); tcp_set_state(sk, TCP_ESTABLISHED);
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
if (skb) { if (skb) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@ -5759,7 +5760,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* to stand against the temptation 8) --ANK * to stand against the temptation 8) --ANK
*/ */
inet_csk_schedule_ack(sk); inet_csk_schedule_ack(sk);
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
tcp_enter_quickack_mode(sk); tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX); TCP_DELACK_MAX, TCP_RTO_MAX);

View File

@ -460,6 +460,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U); minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
newicsk->icsk_rto = TCP_TIMEOUT_INIT; newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
newtp->packets_out = 0; newtp->packets_out = 0;
newtp->retrans_out = 0; newtp->retrans_out = 0;

View File

@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range)); memset(&range, 0, sizeof(range));
range.flags = priv->flags; range.flags = priv->flags;
if (priv->sreg_proto_min) { if (priv->sreg_proto_min) {
range.min_proto.all = range.min_proto.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_min]; &regs->data[priv->sreg_proto_min]);
range.max_proto.all = range.max_proto.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_max]; &regs->data[priv->sreg_proto_max]);
} }
regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
nft_out(pkt)); nft_out(pkt));

View File

@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range)); memset(&range, 0, sizeof(range));
if (priv->sreg_proto_min) { if (priv->sreg_proto_min) {
range.min_proto.all = range.min_proto.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_min], &regs->data[priv->sreg_proto_min]);
range.max_proto.all = range.max_proto.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_max], &regs->data[priv->sreg_proto_max]);
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
} }

View File

@ -3423,6 +3423,8 @@ static int rt6_fill_node(struct net *net,
} }
else if (rt->rt6i_flags & RTF_LOCAL) else if (rt->rt6i_flags & RTF_LOCAL)
rtm->rtm_type = RTN_LOCAL; rtm->rtm_type = RTN_LOCAL;
else if (rt->rt6i_flags & RTF_ANYCAST)
rtm->rtm_type = RTN_ANYCAST;
else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
rtm->rtm_type = RTN_LOCAL; rtm->rtm_type = RTN_LOCAL;
else else

View File

@ -1035,6 +1035,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc6.hlimit = -1; ipc6.hlimit = -1;
ipc6.tclass = -1; ipc6.tclass = -1;
ipc6.dontfrag = -1; ipc6.dontfrag = -1;
sockc.tsflags = sk->sk_tsflags;
/* destination address check */ /* destination address check */
if (sin6) { if (sin6) {
@ -1159,7 +1160,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_mark = sk->sk_mark; fl6.flowi6_mark = sk->sk_mark;
fl6.flowi6_uid = sk->sk_uid; fl6.flowi6_uid = sk->sk_uid;
sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) { if (msg->msg_controllen) {
opt = &opt_space; opt = &opt_space;

View File

@ -1269,6 +1269,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
{ {
struct mpls_route __rcu **platform_label; struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev); struct net *net = dev_net(dev);
unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
unsigned int alive;
unsigned index; unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label); platform_label = rtnl_dereference(net->mpls.platform_label);
@ -1278,9 +1280,11 @@ static void mpls_ifdown(struct net_device *dev, int event)
if (!rt) if (!rt)
continue; continue;
alive = 0;
change_nexthops(rt) { change_nexthops(rt) {
if (rtnl_dereference(nh->nh_dev) != dev) if (rtnl_dereference(nh->nh_dev) != dev)
continue; goto next;
switch (event) { switch (event) {
case NETDEV_DOWN: case NETDEV_DOWN:
case NETDEV_UNREGISTER: case NETDEV_UNREGISTER:
@ -1288,13 +1292,16 @@ static void mpls_ifdown(struct net_device *dev, int event)
/* fall through */ /* fall through */
case NETDEV_CHANGE: case NETDEV_CHANGE:
nh->nh_flags |= RTNH_F_LINKDOWN; nh->nh_flags |= RTNH_F_LINKDOWN;
if (event != NETDEV_UNREGISTER)
ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
break; break;
} }
if (event == NETDEV_UNREGISTER) if (event == NETDEV_UNREGISTER)
RCU_INIT_POINTER(nh->nh_dev, NULL); RCU_INIT_POINTER(nh->nh_dev, NULL);
next:
if (!(nh->nh_flags & nh_flags))
alive++;
} endfor_nexthops(rt); } endfor_nexthops(rt);
WRITE_ONCE(rt->rt_nhn_alive, alive);
} }
} }

View File

@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly; unsigned int nf_conntrack_max __read_mostly;
seqcount_t nf_conntrack_generation __read_mostly; seqcount_t nf_conntrack_generation __read_mostly;
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); /* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
* for the nfctinfo. We cheat by (ab)using the PER CPU cache line
* alignment to enforce this.
*/
DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
static unsigned int nf_conntrack_hash_rnd __read_mostly; static unsigned int nf_conntrack_hash_rnd __read_mostly;

View File

@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb,
enum nf_nat_manip_type maniptype) enum nf_nat_manip_type maniptype)
{ {
sctp_sctphdr_t *hdr; sctp_sctphdr_t *hdr;
int hdrsize = 8;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) /* This could be an inner header returned in imcp packet; in such
* cases we cannot update the checksum field since it is outside
* of the 8 bytes of transport layer headers we are guaranteed.
*/
if (skb->len >= hdroff + sizeof(*hdr))
hdrsize = sizeof(*hdr);
if (!skb_make_writable(skb, hdroff + hdrsize))
return false; return false;
hdr = (struct sctphdr *)(skb->data + hdroff); hdr = (struct sctphdr *)(skb->data + hdroff);
@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb,
hdr->dest = tuple->dst.u.sctp.port; hdr->dest = tuple->dst.u.sctp.port;
} }
if (hdrsize < sizeof(*hdr))
return true;
if (skb->ip_summed != CHECKSUM_PARTIAL) { if (skb->ip_summed != CHECKSUM_PARTIAL) {
hdr->checksum = sctp_compute_cksum(skb, hdroff); hdr->checksum = sctp_compute_cksum(skb, hdroff);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;

View File

@ -3145,7 +3145,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
iter.count = 0; iter.count = 0;
iter.err = 0; iter.err = 0;
iter.fn = nf_tables_bind_check_setelem; iter.fn = nf_tables_bind_check_setelem;
iter.flush = false;
set->ops->walk(ctx, set, &iter); set->ops->walk(ctx, set, &iter);
if (iter.err < 0) if (iter.err < 0)
@ -3399,7 +3398,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
args.iter.count = 0; args.iter.count = 0;
args.iter.err = 0; args.iter.err = 0;
args.iter.fn = nf_tables_dump_setelem; args.iter.fn = nf_tables_dump_setelem;
args.iter.flush = false;
set->ops->walk(&ctx, set, &args.iter); set->ops->walk(&ctx, set, &args.iter);
nla_nest_end(skb, nest); nla_nest_end(skb, nest);
@ -3963,7 +3961,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
struct nft_set_iter iter = { struct nft_set_iter iter = {
.genmask = genmask, .genmask = genmask,
.fn = nft_flush_set, .fn = nft_flush_set,
.flush = true,
}; };
set->ops->walk(&ctx, set, &iter); set->ops->walk(&ctx, set, &iter);
@ -5114,7 +5111,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
iter.count = 0; iter.count = 0;
iter.err = 0; iter.err = 0;
iter.fn = nf_tables_loop_check_setelem; iter.fn = nf_tables_loop_check_setelem;
iter.flush = false;
set->ops->walk(ctx, set, &iter); set->ops->walk(ctx, set, &iter);
if (iter.err < 0) if (iter.err < 0)

View File

@ -83,7 +83,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
switch (priv->key) { switch (priv->key) {
case NFT_CT_DIRECTION: case NFT_CT_DIRECTION:
*dest = CTINFO2DIR(ctinfo); nft_reg_store8(dest, CTINFO2DIR(ctinfo));
return; return;
case NFT_CT_STATUS: case NFT_CT_STATUS:
*dest = ct->status; *dest = ct->status;
@ -151,20 +151,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
return; return;
} }
case NFT_CT_L3PROTOCOL: case NFT_CT_L3PROTOCOL:
*dest = nf_ct_l3num(ct); nft_reg_store8(dest, nf_ct_l3num(ct));
return; return;
case NFT_CT_PROTOCOL: case NFT_CT_PROTOCOL:
*dest = nf_ct_protonum(ct); nft_reg_store8(dest, nf_ct_protonum(ct));
return; return;
#ifdef CONFIG_NF_CONNTRACK_ZONES #ifdef CONFIG_NF_CONNTRACK_ZONES
case NFT_CT_ZONE: { case NFT_CT_ZONE: {
const struct nf_conntrack_zone *zone = nf_ct_zone(ct); const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
u16 zoneid;
if (priv->dir < IP_CT_DIR_MAX) if (priv->dir < IP_CT_DIR_MAX)
*dest = nf_ct_zone_id(zone, priv->dir); zoneid = nf_ct_zone_id(zone, priv->dir);
else else
*dest = zone->id; zoneid = zone->id;
nft_reg_store16(dest, zoneid);
return; return;
} }
#endif #endif
@ -183,10 +185,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
return; return;
case NFT_CT_PROTO_SRC: case NFT_CT_PROTO_SRC:
*dest = (__force __u16)tuple->src.u.all; nft_reg_store16(dest, (__force u16)tuple->src.u.all);
return; return;
case NFT_CT_PROTO_DST: case NFT_CT_PROTO_DST:
*dest = (__force __u16)tuple->dst.u.all; nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
return; return;
default: default:
break; break;
@ -205,7 +207,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
const struct nft_ct *priv = nft_expr_priv(expr); const struct nft_ct *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb; struct sk_buff *skb = pkt->skb;
enum ip_conntrack_info ctinfo; enum ip_conntrack_info ctinfo;
u16 value = regs->data[priv->sreg]; u16 value = nft_reg_load16(&regs->data[priv->sreg]);
struct nf_conn *ct; struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo); ct = nf_ct_get(skb, &ctinfo);
@ -542,7 +544,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
case IP_CT_DIR_REPLY: case IP_CT_DIR_REPLY:
break; break;
default: default:
return -EINVAL; err = -EINVAL;
goto err1;
} }
} }

View File

@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
*dest = skb->len; *dest = skb->len;
break; break;
case NFT_META_PROTOCOL: case NFT_META_PROTOCOL:
*dest = 0; nft_reg_store16(dest, (__force u16)skb->protocol);
*(__be16 *)dest = skb->protocol;
break; break;
case NFT_META_NFPROTO: case NFT_META_NFPROTO:
*dest = nft_pf(pkt); nft_reg_store8(dest, nft_pf(pkt));
break; break;
case NFT_META_L4PROTO: case NFT_META_L4PROTO:
if (!pkt->tprot_set) if (!pkt->tprot_set)
goto err; goto err;
*dest = pkt->tprot; nft_reg_store8(dest, pkt->tprot);
break; break;
case NFT_META_PRIORITY: case NFT_META_PRIORITY:
*dest = skb->priority; *dest = skb->priority;
@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
case NFT_META_IIFTYPE: case NFT_META_IIFTYPE:
if (in == NULL) if (in == NULL)
goto err; goto err;
*dest = 0; nft_reg_store16(dest, in->type);
*(u16 *)dest = in->type;
break; break;
case NFT_META_OIFTYPE: case NFT_META_OIFTYPE:
if (out == NULL) if (out == NULL)
goto err; goto err;
*dest = 0; nft_reg_store16(dest, out->type);
*(u16 *)dest = out->type;
break; break;
case NFT_META_SKUID: case NFT_META_SKUID:
sk = skb_to_full_sk(skb); sk = skb_to_full_sk(skb);
@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr,
#endif #endif
case NFT_META_PKTTYPE: case NFT_META_PKTTYPE:
if (skb->pkt_type != PACKET_LOOPBACK) { if (skb->pkt_type != PACKET_LOOPBACK) {
*dest = skb->pkt_type; nft_reg_store8(dest, skb->pkt_type);
break; break;
} }
switch (nft_pf(pkt)) { switch (nft_pf(pkt)) {
case NFPROTO_IPV4: case NFPROTO_IPV4:
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) if (ipv4_is_multicast(ip_hdr(skb)->daddr))
*dest = PACKET_MULTICAST; nft_reg_store8(dest, PACKET_MULTICAST);
else else
*dest = PACKET_BROADCAST; nft_reg_store8(dest, PACKET_BROADCAST);
break; break;
case NFPROTO_IPV6: case NFPROTO_IPV6:
*dest = PACKET_MULTICAST; nft_reg_store8(dest, PACKET_MULTICAST);
break; break;
case NFPROTO_NETDEV: case NFPROTO_NETDEV:
switch (skb->protocol) { switch (skb->protocol) {
@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
goto err; goto err;
if (ipv4_is_multicast(iph->daddr)) if (ipv4_is_multicast(iph->daddr))
*dest = PACKET_MULTICAST; nft_reg_store8(dest, PACKET_MULTICAST);
else else
*dest = PACKET_BROADCAST; nft_reg_store8(dest, PACKET_BROADCAST);
break; break;
} }
case htons(ETH_P_IPV6): case htons(ETH_P_IPV6):
*dest = PACKET_MULTICAST; nft_reg_store8(dest, PACKET_MULTICAST);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
{ {
const struct nft_meta *meta = nft_expr_priv(expr); const struct nft_meta *meta = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb; struct sk_buff *skb = pkt->skb;
u32 value = regs->data[meta->sreg]; u32 *sreg = &regs->data[meta->sreg];
u32 value = *sreg;
u8 pkt_type;
switch (meta->key) { switch (meta->key) {
case NFT_META_MARK: case NFT_META_MARK:
@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
skb->priority = value; skb->priority = value;
break; break;
case NFT_META_PKTTYPE: case NFT_META_PKTTYPE:
if (skb->pkt_type != value && pkt_type = nft_reg_load8(sreg);
skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type))
skb->pkt_type = value; if (skb->pkt_type != pkt_type &&
skb_pkt_type_ok(pkt_type) &&
skb_pkt_type_ok(skb->pkt_type))
skb->pkt_type = pkt_type;
break; break;
case NFT_META_NFTRACE: case NFT_META_NFTRACE:
skb->nf_trace = !!value; skb->nf_trace = !!value;

View File

@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
} }
if (priv->sreg_proto_min) { if (priv->sreg_proto_min) {
range.min_proto.all = range.min_proto.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_min]; &regs->data[priv->sreg_proto_min]);
range.max_proto.all = range.max_proto.all = (__force __be16)nft_reg_load16(
*(__be16 *)&regs->data[priv->sreg_proto_max]; &regs->data[priv->sreg_proto_max]);
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
} }

View File

@ -15,6 +15,11 @@
#include <linux/netfilter/nf_tables.h> #include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h>
struct nft_bitmap_elem {
struct list_head head;
struct nft_set_ext ext;
};
/* This bitmap uses two bits to represent one element. These two bits determine /* This bitmap uses two bits to represent one element. These two bits determine
* the element state in the current and the future generation. * the element state in the current and the future generation.
* *
@ -41,13 +46,22 @@
* restore its previous state. * restore its previous state.
*/ */
struct nft_bitmap { struct nft_bitmap {
u16 bitmap_size; struct list_head list;
u8 bitmap[]; u16 bitmap_size;
u8 bitmap[];
}; };
static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off) static inline void nft_bitmap_location(const struct nft_set *set,
const void *key,
u32 *idx, u32 *off)
{ {
u32 k = (key << 1); u32 k;
if (set->klen == 2)
k = *(u16 *)key;
else
k = *(u8 *)key;
k <<= 1;
*idx = k / BITS_PER_BYTE; *idx = k / BITS_PER_BYTE;
*off = k % BITS_PER_BYTE; *off = k % BITS_PER_BYTE;
@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
u8 genmask = nft_genmask_cur(net); u8 genmask = nft_genmask_cur(net);
u32 idx, off; u32 idx, off;
nft_bitmap_location(*key, &idx, &off); nft_bitmap_location(set, key, &idx, &off);
return nft_bitmap_active(priv->bitmap, idx, off, genmask); return nft_bitmap_active(priv->bitmap, idx, off, genmask);
} }
static struct nft_bitmap_elem *
nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
u8 genmask)
{
const struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be;
list_for_each_entry_rcu(be, &priv->list, head) {
if (memcmp(nft_set_ext_key(&be->ext),
nft_set_ext_key(&this->ext), set->klen) ||
!nft_set_elem_active(&be->ext, genmask))
continue;
return be;
}
return NULL;
}
static int nft_bitmap_insert(const struct net *net, const struct nft_set *set, static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, const struct nft_set_elem *elem,
struct nft_set_ext **_ext) struct nft_set_ext **ext)
{ {
struct nft_bitmap *priv = nft_set_priv(set); struct nft_bitmap *priv = nft_set_priv(set);
struct nft_set_ext *ext = elem->priv; struct nft_bitmap_elem *new = elem->priv, *be;
u8 genmask = nft_genmask_next(net); u8 genmask = nft_genmask_next(net);
u32 idx, off; u32 idx, off;
nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); be = nft_bitmap_elem_find(set, new, genmask);
if (nft_bitmap_active(priv->bitmap, idx, off, genmask)) if (be) {
*ext = &be->ext;
return -EEXIST; return -EEXIST;
}
nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off);
/* Enter 01 state. */ /* Enter 01 state. */
priv->bitmap[idx] |= (genmask << off); priv->bitmap[idx] |= (genmask << off);
list_add_tail_rcu(&new->head, &priv->list);
return 0; return 0;
} }
@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net,
const struct nft_set_elem *elem) const struct nft_set_elem *elem)
{ {
struct nft_bitmap *priv = nft_set_priv(set); struct nft_bitmap *priv = nft_set_priv(set);
struct nft_set_ext *ext = elem->priv; struct nft_bitmap_elem *be = elem->priv;
u8 genmask = nft_genmask_next(net); u8 genmask = nft_genmask_next(net);
u32 idx, off; u32 idx, off;
nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 00 state. */ /* Enter 00 state. */
priv->bitmap[idx] &= ~(genmask << off); priv->bitmap[idx] &= ~(genmask << off);
list_del_rcu(&be->head);
} }
static void nft_bitmap_activate(const struct net *net, static void nft_bitmap_activate(const struct net *net,
@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net,
const struct nft_set_elem *elem) const struct nft_set_elem *elem)
{ {
struct nft_bitmap *priv = nft_set_priv(set); struct nft_bitmap *priv = nft_set_priv(set);
struct nft_set_ext *ext = elem->priv; struct nft_bitmap_elem *be = elem->priv;
u8 genmask = nft_genmask_next(net); u8 genmask = nft_genmask_next(net);
u32 idx, off; u32 idx, off;
nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 11 state. */ /* Enter 11 state. */
priv->bitmap[idx] |= (genmask << off); priv->bitmap[idx] |= (genmask << off);
nft_set_elem_change_active(net, set, &be->ext);
} }
static bool nft_bitmap_flush(const struct net *net, static bool nft_bitmap_flush(const struct net *net,
const struct nft_set *set, void *ext) const struct nft_set *set, void *_be)
{ {
struct nft_bitmap *priv = nft_set_priv(set); struct nft_bitmap *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net); u8 genmask = nft_genmask_next(net);
struct nft_bitmap_elem *be = _be;
u32 idx, off; u32 idx, off;
nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 10 state, similar to deactivation. */ /* Enter 10 state, similar to deactivation. */
priv->bitmap[idx] &= ~(genmask << off); priv->bitmap[idx] &= ~(genmask << off);
nft_set_elem_change_active(net, set, &be->ext);
return true; return true;
} }
static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_set_ext_tmpl tmpl;
struct nft_set_ext *ext;
nft_set_ext_prepare(&tmpl);
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
ext = kzalloc(tmpl.len, GFP_KERNEL);
if (!ext)
return NULL;
nft_set_ext_init(ext, &tmpl);
memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen);
return ext;
}
static void *nft_bitmap_deactivate(const struct net *net, static void *nft_bitmap_deactivate(const struct net *net,
const struct nft_set *set, const struct nft_set *set,
const struct nft_set_elem *elem) const struct nft_set_elem *elem)
{ {
struct nft_bitmap *priv = nft_set_priv(set); struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *this = elem->priv, *be;
u8 genmask = nft_genmask_next(net); u8 genmask = nft_genmask_next(net);
struct nft_set_ext *ext; u32 idx, off;
u32 idx, off, key = 0;
memcpy(&key, elem->key.val.data, set->klen); nft_bitmap_location(set, elem->key.val.data, &idx, &off);
nft_bitmap_location(key, &idx, &off);
if (!nft_bitmap_active(priv->bitmap, idx, off, genmask)) be = nft_bitmap_elem_find(set, this, genmask);
return NULL; if (!be)
/* We have no real set extension since this is a bitmap, allocate this
* dummy object that is released from the commit/abort path.
*/
ext = nft_bitmap_ext_alloc(set, elem);
if (!ext)
return NULL; return NULL;
/* Enter 10 state. */ /* Enter 10 state. */
priv->bitmap[idx] &= ~(genmask << off); priv->bitmap[idx] &= ~(genmask << off);
nft_set_elem_change_active(net, set, &be->ext);
return ext; return be;
} }
static void nft_bitmap_walk(const struct nft_ctx *ctx, static void nft_bitmap_walk(const struct nft_ctx *ctx,
@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
struct nft_set_iter *iter) struct nft_set_iter *iter)
{ {
const struct nft_bitmap *priv = nft_set_priv(set); const struct nft_bitmap *priv = nft_set_priv(set);
struct nft_set_ext_tmpl tmpl; struct nft_bitmap_elem *be;
struct nft_set_elem elem; struct nft_set_elem elem;
struct nft_set_ext *ext;
int idx, off;
u16 key;
nft_set_ext_prepare(&tmpl); list_for_each_entry_rcu(be, &priv->list, head) {
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); if (iter->count < iter->skip)
goto cont;
if (!nft_set_elem_active(&be->ext, iter->genmask))
goto cont;
for (idx = 0; idx < priv->bitmap_size; idx++) { elem.priv = be;
for (off = 0; off < BITS_PER_BYTE; off += 2) {
if (iter->count < iter->skip)
goto cont;
if (!nft_bitmap_active(priv->bitmap, idx, off, iter->err = iter->fn(ctx, set, iter, &elem);
iter->genmask))
goto cont;
ext = kzalloc(tmpl.len, GFP_KERNEL); if (iter->err < 0)
if (!ext) { return;
iter->err = -ENOMEM;
return;
}
nft_set_ext_init(ext, &tmpl);
key = ((idx * BITS_PER_BYTE) + off) >> 1;
memcpy(nft_set_ext_key(ext), &key, set->klen);
elem.priv = ext;
iter->err = iter->fn(ctx, set, iter, &elem);
/* On set flush, this dummy extension object is released
* from the commit/abort path.
*/
if (!iter->flush)
kfree(ext);
if (iter->err < 0)
return;
cont: cont:
iter->count++; iter->count++;
}
} }
} }
@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set,
{ {
struct nft_bitmap *priv = nft_set_priv(set); struct nft_bitmap *priv = nft_set_priv(set);
INIT_LIST_HEAD(&priv->list);
priv->bitmap_size = nft_bitmap_size(set->klen); priv->bitmap_size = nft_bitmap_size(set->klen);
return 0; return 0;
@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
static struct nft_set_ops nft_bitmap_ops __read_mostly = { static struct nft_set_ops nft_bitmap_ops __read_mostly = {
.privsize = nft_bitmap_privsize, .privsize = nft_bitmap_privsize,
.elemsize = offsetof(struct nft_bitmap_elem, ext),
.estimate = nft_bitmap_estimate, .estimate = nft_bitmap_estimate,
.init = nft_bitmap_init, .init = nft_bitmap_init,
.destroy = nft_bitmap_destroy, .destroy = nft_bitmap_destroy,

View File

@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
"nlk_cb_mutex-ROUTE",
"nlk_cb_mutex-1",
"nlk_cb_mutex-USERSOCK",
"nlk_cb_mutex-FIREWALL",
"nlk_cb_mutex-SOCK_DIAG",
"nlk_cb_mutex-NFLOG",
"nlk_cb_mutex-XFRM",
"nlk_cb_mutex-SELINUX",
"nlk_cb_mutex-ISCSI",
"nlk_cb_mutex-AUDIT",
"nlk_cb_mutex-FIB_LOOKUP",
"nlk_cb_mutex-CONNECTOR",
"nlk_cb_mutex-NETFILTER",
"nlk_cb_mutex-IP6_FW",
"nlk_cb_mutex-DNRTMSG",
"nlk_cb_mutex-KOBJECT_UEVENT",
"nlk_cb_mutex-GENERIC",
"nlk_cb_mutex-17",
"nlk_cb_mutex-SCSITRANSPORT",
"nlk_cb_mutex-ECRYPTFS",
"nlk_cb_mutex-RDMA",
"nlk_cb_mutex-CRYPTO",
"nlk_cb_mutex-SMC",
"nlk_cb_mutex-23",
"nlk_cb_mutex-24",
"nlk_cb_mutex-25",
"nlk_cb_mutex-26",
"nlk_cb_mutex-27",
"nlk_cb_mutex-28",
"nlk_cb_mutex-29",
"nlk_cb_mutex-30",
"nlk_cb_mutex-31",
"nlk_cb_mutex-MAX_LINKS"
};
static int netlink_dump(struct sock *sk); static int netlink_dump(struct sock *sk);
static void netlink_skb_destructor(struct sk_buff *skb); static void netlink_skb_destructor(struct sk_buff *skb);
@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
} else { } else {
nlk->cb_mutex = &nlk->cb_def_mutex; nlk->cb_mutex = &nlk->cb_def_mutex;
mutex_init(nlk->cb_mutex); mutex_init(nlk->cb_mutex);
lockdep_set_class_and_name(nlk->cb_mutex,
nlk_cb_mutex_keys + protocol,
nlk_cb_mutex_key_strings[protocol]);
} }
init_waitqueue_head(&nlk->wait); init_waitqueue_head(&nlk->wait);

View File

@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh->nlmsg_seq, NLM_F_MULTI,
skb, CTRL_CMD_NEWFAMILY) < 0) skb, CTRL_CMD_NEWFAMILY) < 0) {
n--;
break; break;
}
} }
cb->args[0] = n; cb->args[0] = n;

View File

@ -604,7 +604,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
ipv4 = true; ipv4 = true;
break; break;
case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
nla_get_in6_addr(a), is_mask); nla_get_in6_addr(a), is_mask);
ipv6 = true; ipv6 = true;
break; break;
@ -665,6 +665,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
tun_flags |= TUNNEL_VXLAN_OPT; tun_flags |= TUNNEL_VXLAN_OPT;
opts_type = type; opts_type = type;
break; break;
case OVS_TUNNEL_KEY_ATTR_PAD:
break;
default: default:
OVS_NLERR(log, "Unknown IP tunnel attribute %d", OVS_NLERR(log, "Unknown IP tunnel attribute %d",
type); type);

View File

@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
rxrpc_conn_retransmit_call(conn, skb); rxrpc_conn_retransmit_call(conn, skb);
return 0; return 0;
case RXRPC_PACKET_TYPE_BUSY:
/* Just ignore BUSY packets for now. */
return 0;
case RXRPC_PACKET_TYPE_ABORT: case RXRPC_PACKET_TYPE_ABORT:
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&wtmp, sizeof(wtmp)) < 0) &wtmp, sizeof(wtmp)) < 0)

View File

@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
if (p->set_tc_index) { if (p->set_tc_index) {
int wlen = skb_network_offset(skb);
switch (tc_skb_protocol(skb)) { switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP): case htons(ETH_P_IP):
if (skb_cow_head(skb, sizeof(struct iphdr))) wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen) ||
skb_try_make_writable(skb, wlen))
goto drop; goto drop;
skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
break; break;
case htons(ETH_P_IPV6): case htons(ETH_P_IPV6):
if (skb_cow_head(skb, sizeof(struct ipv6hdr))) wlen += sizeof(struct ipv6hdr);
if (!pskb_may_pull(skb, wlen) ||
skb_try_make_writable(skb, wlen))
goto drop; goto drop;
skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))

View File

@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
{ {
struct net *net = sock_net(sk); struct net *net = sock_net(sk);
struct sctp_sock *sp; struct sctp_sock *sp;
int i;
sctp_paramhdr_t *p; sctp_paramhdr_t *p;
int err; int i;
/* Retrieve the SCTP per socket area. */ /* Retrieve the SCTP per socket area. */
sp = sctp_sk((struct sock *)sk); sp = sctp_sk((struct sock *)sk);
@ -264,8 +263,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* AUTH related initializations */ /* AUTH related initializations */
INIT_LIST_HEAD(&asoc->endpoint_shared_keys); INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
if (err)
goto fail_init; goto fail_init;
asoc->active_key_id = ep->active_key_id; asoc->active_key_id = ep->active_key_id;

View File

@ -546,7 +546,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
struct sctp_association *asoc = tp->asoc; struct sctp_association *asoc = tp->asoc;
struct sctp_chunk *chunk, *tmp; struct sctp_chunk *chunk, *tmp;
int pkt_count, gso = 0; int pkt_count, gso = 0;
int confirm;
struct dst_entry *dst; struct dst_entry *dst;
struct sk_buff *head; struct sk_buff *head;
struct sctphdr *sh; struct sctphdr *sh;
@ -625,13 +624,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
asoc->peer.last_sent_to = tp; asoc->peer.last_sent_to = tp;
} }
head->ignore_df = packet->ipfragok; head->ignore_df = packet->ipfragok;
confirm = tp->dst_pending_confirm; if (tp->dst_pending_confirm)
if (confirm)
skb_set_dst_pending_confirm(head, 1); skb_set_dst_pending_confirm(head, 1);
/* neighbour should be confirmed on successful transmission or /* neighbour should be confirmed on successful transmission or
* positive error * positive error
*/ */
if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm) if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
tp->dst_pending_confirm)
tp->dst_pending_confirm = 0; tp->dst_pending_confirm = 0;
out: out:

View File

@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
} }
static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo, struct sctp_sndrcvinfo *sinfo, int msg_len)
struct list_head *queue, int msg_len)
{ {
struct sctp_outq *q = &asoc->outqueue;
struct sctp_chunk *chk, *temp; struct sctp_chunk *chk, *temp;
list_for_each_entry_safe(chk, temp, queue, list) { list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
continue; continue;
list_del_init(&chk->list); list_del_init(&chk->list);
q->out_qlen -= chk->skb->len;
asoc->sent_cnt_removable--; asoc->sent_cnt_removable--;
asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
return; return;
} }
sctp_prsctp_prune_unsent(asoc, sinfo, sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
&asoc->outqueue.out_chunk_list,
msg_len);
} }
/* Mark all the eligible packets on a transport for retransmission. */ /* Mark all the eligible packets on a transport for retransmission. */

View File

@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
} }
EXPORT_SYMBOL(kernel_sendmsg); EXPORT_SYMBOL(kernel_sendmsg);
static bool skb_is_err_queue(const struct sk_buff *skb)
{
/* pkt_type of skbs enqueued on the error queue are set to
* PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
* in recvmsg, since skbs received on a local socket will never
* have a pkt_type of PACKET_OUTGOING.
*/
return skb->pkt_type == PACKET_OUTGOING;
}
/* /*
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
*/ */
@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
put_cmsg(msg, SOL_SOCKET, put_cmsg(msg, SOL_SOCKET,
SCM_TIMESTAMPING, sizeof(tss), &tss); SCM_TIMESTAMPING, sizeof(tss), &tss);
if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS)) if (skb_is_err_queue(skb) && skb->len &&
SKB_EXT_ERR(skb)->opt_stats)
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS, put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
skb->len, skb->data); skb->len, skb->data);
} }

View File

@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
static void tipc_subscrp_timeout(unsigned long data) static void tipc_subscrp_timeout(unsigned long data)
{ {
struct tipc_subscription *sub = (struct tipc_subscription *)data; struct tipc_subscription *sub = (struct tipc_subscription *)data;
struct tipc_subscriber *subscriber = sub->subscriber;
spin_lock_bh(&subscriber->lock);
tipc_nametbl_unsubscribe(sub);
spin_unlock_bh(&subscriber->lock);
/* Notify subscriber of timeout */ /* Notify subscriber of timeout */
tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref)
struct tipc_subscriber *subscriber = sub->subscriber; struct tipc_subscriber *subscriber = sub->subscriber;
spin_lock_bh(&subscriber->lock); spin_lock_bh(&subscriber->lock);
tipc_nametbl_unsubscribe(sub);
list_del(&sub->subscrp_list); list_del(&sub->subscrp_list);
atomic_dec(&tn->subscription_count); atomic_dec(&tn->subscription_count);
spin_unlock_bh(&subscriber->lock); spin_unlock_bh(&subscriber->lock);
@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
continue; continue;
tipc_nametbl_unsubscribe(sub);
tipc_subscrp_get(sub); tipc_subscrp_get(sub);
spin_unlock_bh(&subscriber->lock); spin_unlock_bh(&subscriber->lock);
tipc_subscrp_delete(sub); tipc_subscrp_delete(sub);

View File

@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
if (s) { if (s) {
struct unix_sock *u = unix_sk(s); struct unix_sock *u = unix_sk(s);
BUG_ON(!atomic_long_read(&u->inflight));
BUG_ON(list_empty(&u->link)); BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight)) if (atomic_long_dec_and_test(&u->inflight))
@ -341,6 +342,14 @@ void unix_gc(void)
} }
list_del(&cursor); list_del(&cursor);
/* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, inc_inflight, &hitlist);
/* not_cycle_list contains those sockets which do not make up a /* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list. * cycle. Restore these to the inflight list.
*/ */
@ -350,14 +359,6 @@ void unix_gc(void)
list_move_tail(&u->link, &gc_inflight_list); list_move_tail(&u->link, &gc_inflight_list);
} }
/* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, inc_inflight, &hitlist);
spin_unlock(&unix_gc_lock); spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */ /* Here we are. Hitlist is filled. Die. */

View File

@ -1102,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = {
.sendpage = sock_no_sendpage, .sendpage = sock_no_sendpage,
}; };
static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
{
if (!transport->cancel_pkt)
return -EOPNOTSUPP;
return transport->cancel_pkt(vsk);
}
static void vsock_connect_timeout(struct work_struct *work) static void vsock_connect_timeout(struct work_struct *work)
{ {
struct sock *sk; struct sock *sk;
struct vsock_sock *vsk; struct vsock_sock *vsk;
int cancel = 0;
vsk = container_of(work, struct vsock_sock, dwork.work); vsk = container_of(work, struct vsock_sock, dwork.work);
sk = sk_vsock(vsk); sk = sk_vsock(vsk);
@ -1116,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work)
sk->sk_state = SS_UNCONNECTED; sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ETIMEDOUT; sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk); sk->sk_error_report(sk);
cancel = 1;
} }
release_sock(sk); release_sock(sk);
if (cancel)
vsock_transport_cancel_pkt(vsk);
sock_put(sk); sock_put(sk);
} }
@ -1224,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
err = sock_intr_errno(timeout); err = sock_intr_errno(timeout);
sk->sk_state = SS_UNCONNECTED; sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED; sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
goto out_wait; goto out_wait;
} else if (timeout == 0) { } else if (timeout == 0) {
err = -ETIMEDOUT; err = -ETIMEDOUT;
sk->sk_state = SS_UNCONNECTED; sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED; sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
goto out_wait; goto out_wait;
} }

View File

@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
return len; return len;
} }
static int
virtio_transport_cancel_pkt(struct vsock_sock *vsk)
{
struct virtio_vsock *vsock;
struct virtio_vsock_pkt *pkt, *n;
int cnt = 0;
LIST_HEAD(freeme);
vsock = virtio_vsock_get();
if (!vsock) {
return -ENODEV;
}
spin_lock_bh(&vsock->send_pkt_list_lock);
list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
if (pkt->vsk != vsk)
continue;
list_move(&pkt->list, &freeme);
}
spin_unlock_bh(&vsock->send_pkt_list_lock);
list_for_each_entry_safe(pkt, n, &freeme, list) {
if (pkt->reply)
cnt++;
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}
if (cnt) {
struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
int new_cnt;
new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
new_cnt < virtqueue_get_vring_size(rx_vq))
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
}
return 0;
}
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{ {
int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = {
.release = virtio_transport_release, .release = virtio_transport_release,
.connect = virtio_transport_connect, .connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown, .shutdown = virtio_transport_shutdown,
.cancel_pkt = virtio_transport_cancel_pkt,
.dgram_bind = virtio_transport_dgram_bind, .dgram_bind = virtio_transport_dgram_bind,
.dgram_dequeue = virtio_transport_dgram_dequeue, .dgram_dequeue = virtio_transport_dgram_dequeue,

View File

@ -58,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
pkt->len = len; pkt->len = len;
pkt->hdr.len = cpu_to_le32(len); pkt->hdr.len = cpu_to_le32(len);
pkt->reply = info->reply; pkt->reply = info->reply;
pkt->vsk = info->vsk;
if (info->msg && len > 0) { if (info->msg && len > 0) {
pkt->buf = kmalloc(len, GFP_KERNEL); pkt->buf = kmalloc(len, GFP_KERNEL);
@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
struct virtio_vsock_pkt_info info = { struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
.type = type, .type = type,
.vsk = vsk,
}; };
return virtio_transport_send_pkt_info(vsk, &info); return virtio_transport_send_pkt_info(vsk, &info);
@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
struct virtio_vsock_pkt_info info = { struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_REQUEST, .op = VIRTIO_VSOCK_OP_REQUEST,
.type = VIRTIO_VSOCK_TYPE_STREAM, .type = VIRTIO_VSOCK_TYPE_STREAM,
.vsk = vsk,
}; };
return virtio_transport_send_pkt_info(vsk, &info); return virtio_transport_send_pkt_info(vsk, &info);
@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
(mode & SEND_SHUTDOWN ? (mode & SEND_SHUTDOWN ?
VIRTIO_VSOCK_SHUTDOWN_SEND : 0), VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
.vsk = vsk,
}; };
return virtio_transport_send_pkt_info(vsk, &info); return virtio_transport_send_pkt_info(vsk, &info);
@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
.type = VIRTIO_VSOCK_TYPE_STREAM, .type = VIRTIO_VSOCK_TYPE_STREAM,
.msg = msg, .msg = msg,
.pkt_len = len, .pkt_len = len,
.vsk = vsk,
}; };
return virtio_transport_send_pkt_info(vsk, &info); return virtio_transport_send_pkt_info(vsk, &info);
@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
.op = VIRTIO_VSOCK_OP_RST, .op = VIRTIO_VSOCK_OP_RST,
.type = VIRTIO_VSOCK_TYPE_STREAM, .type = VIRTIO_VSOCK_TYPE_STREAM,
.reply = !!pkt, .reply = !!pkt,
.vsk = vsk,
}; };
/* Send RST only if the original pkt is not a RST pkt */ /* Send RST only if the original pkt is not a RST pkt */
@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
.remote_cid = le64_to_cpu(pkt->hdr.src_cid), .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
.remote_port = le32_to_cpu(pkt->hdr.src_port), .remote_port = le32_to_cpu(pkt->hdr.src_port),
.reply = true, .reply = true,
.vsk = vsk,
}; };
return virtio_transport_send_pkt_info(vsk, &info); return virtio_transport_send_pkt_info(vsk, &info);

View File

@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
{ {
int err; int err;
rtnl_lock();
if (!cb->args[0]) { if (!cb->args[0]) {
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
genl_family_attrbuf(&nl80211_fam), genl_family_attrbuf(&nl80211_fam),
nl80211_fam.maxattr, nl80211_policy); nl80211_fam.maxattr, nl80211_policy);
if (err) if (err)
goto out_unlock; return err;
*wdev = __cfg80211_wdev_from_attrs( *wdev = __cfg80211_wdev_from_attrs(
sock_net(skb->sk), sock_net(skb->sk),
genl_family_attrbuf(&nl80211_fam)); genl_family_attrbuf(&nl80211_fam));
if (IS_ERR(*wdev)) { if (IS_ERR(*wdev))
err = PTR_ERR(*wdev); return PTR_ERR(*wdev);
goto out_unlock;
}
*rdev = wiphy_to_rdev((*wdev)->wiphy); *rdev = wiphy_to_rdev((*wdev)->wiphy);
/* 0 is the first index - add 1 to parse only once */ /* 0 is the first index - add 1 to parse only once */
cb->args[0] = (*rdev)->wiphy_idx + 1; cb->args[0] = (*rdev)->wiphy_idx + 1;
@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp; struct wireless_dev *tmp;
if (!wiphy) { if (!wiphy)
err = -ENODEV; return -ENODEV;
goto out_unlock;
}
*rdev = wiphy_to_rdev(wiphy); *rdev = wiphy_to_rdev(wiphy);
*wdev = NULL; *wdev = NULL;
@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
} }
} }
if (!*wdev) { if (!*wdev)
err = -ENODEV; return -ENODEV;
goto out_unlock;
}
} }
return 0; return 0;
out_unlock:
rtnl_unlock();
return err;
}
static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
{
rtnl_unlock();
} }
/* IE validation */ /* IE validation */
@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
int filter_wiphy = -1; int filter_wiphy = -1;
struct cfg80211_registered_device *rdev; struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev; struct wireless_dev *wdev;
int ret;
rtnl_lock(); rtnl_lock();
if (!cb->args[2]) { if (!cb->args[2]) {
struct nl80211_dump_wiphy_state state = { struct nl80211_dump_wiphy_state state = {
.filter_wiphy = -1, .filter_wiphy = -1,
}; };
int ret;
ret = nl80211_dump_wiphy_parse(skb, cb, &state); ret = nl80211_dump_wiphy_parse(skb, cb, &state);
if (ret) if (ret)
return ret; goto out_unlock;
filter_wiphy = state.filter_wiphy; filter_wiphy = state.filter_wiphy;
@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
wp_idx++; wp_idx++;
} }
out: out:
rtnl_unlock();
cb->args[0] = wp_idx; cb->args[0] = wp_idx;
cb->args[1] = if_idx; cb->args[1] = if_idx;
return skb->len; ret = skb->len;
out_unlock:
rtnl_unlock();
return ret;
} }
static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
int sta_idx = cb->args[2]; int sta_idx = cb->args[2];
int err; int err;
rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err) if (err)
return err; goto out_err;
if (!wdev->netdev) { if (!wdev->netdev) {
err = -EINVAL; err = -EINVAL;
@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
cb->args[2] = sta_idx; cb->args[2] = sta_idx;
err = skb->len; err = skb->len;
out_err: out_err:
nl80211_finish_wdev_dump(rdev); rtnl_unlock();
return err; return err;
} }
@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
int path_idx = cb->args[2]; int path_idx = cb->args[2];
int err; int err;
rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err) if (err)
return err; goto out_err;
if (!rdev->ops->dump_mpath) { if (!rdev->ops->dump_mpath) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
cb->args[2] = path_idx; cb->args[2] = path_idx;
err = skb->len; err = skb->len;
out_err: out_err:
nl80211_finish_wdev_dump(rdev); rtnl_unlock();
return err; return err;
} }
@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
int path_idx = cb->args[2]; int path_idx = cb->args[2];
int err; int err;
rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err) if (err)
return err; goto out_err;
if (!rdev->ops->dump_mpp) { if (!rdev->ops->dump_mpp) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
cb->args[2] = path_idx; cb->args[2] = path_idx;
err = skb->len; err = skb->len;
out_err: out_err:
nl80211_finish_wdev_dump(rdev); rtnl_unlock();
return err; return err;
} }
@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
int start = cb->args[2], idx = 0; int start = cb->args[2], idx = 0;
int err; int err;
rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err) if (err) {
rtnl_unlock();
return err; return err;
}
wdev_lock(wdev); wdev_lock(wdev);
spin_lock_bh(&rdev->bss_lock); spin_lock_bh(&rdev->bss_lock);
@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
wdev_unlock(wdev); wdev_unlock(wdev);
cb->args[2] = idx; cb->args[2] = idx;
nl80211_finish_wdev_dump(rdev); rtnl_unlock();
return skb->len; return skb->len;
} }
@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
int res; int res;
bool radio_stats; bool radio_stats;
rtnl_lock();
res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (res) if (res)
return res; goto out_err;
/* prepare_wdev_dump parsed the attributes */ /* prepare_wdev_dump parsed the attributes */
radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[2] = survey_idx; cb->args[2] = survey_idx;
res = skb->len; res = skb->len;
out_err: out_err:
nl80211_finish_wdev_dump(rdev); rtnl_unlock();
return res; return res;
} }
@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
void *data = NULL; void *data = NULL;
unsigned int data_len = 0; unsigned int data_len = 0;
rtnl_lock();
if (cb->args[0]) { if (cb->args[0]) {
/* subtract the 1 again here */ /* subtract the 1 again here */
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp; struct wireless_dev *tmp;
if (!wiphy) { if (!wiphy)
err = -ENODEV; return -ENODEV;
goto out_unlock;
}
*rdev = wiphy_to_rdev(wiphy); *rdev = wiphy_to_rdev(wiphy);
*wdev = NULL; *wdev = NULL;
@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
attrbuf, nl80211_fam.maxattr, nl80211_policy); attrbuf, nl80211_fam.maxattr, nl80211_policy);
if (err) if (err)
goto out_unlock; return err;
if (!attrbuf[NL80211_ATTR_VENDOR_ID] || if (!attrbuf[NL80211_ATTR_VENDOR_ID] ||
!attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { !attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
err = -EINVAL; return -EINVAL;
goto out_unlock;
}
*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf); *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
if (IS_ERR(*wdev)) if (IS_ERR(*wdev))
*wdev = NULL; *wdev = NULL;
*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
if (IS_ERR(*rdev)) { if (IS_ERR(*rdev))
err = PTR_ERR(*rdev); return PTR_ERR(*rdev);
goto out_unlock;
}
vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]);
subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
continue; continue;
if (!vcmd->dumpit) { if (!vcmd->dumpit)
err = -EOPNOTSUPP; return -EOPNOTSUPP;
goto out_unlock;
}
vcmd_idx = i; vcmd_idx = i;
break; break;
} }
if (vcmd_idx < 0) { if (vcmd_idx < 0)
err = -EOPNOTSUPP; return -EOPNOTSUPP;
goto out_unlock;
}
if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { if (attrbuf[NL80211_ATTR_VENDOR_DATA]) {
data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]);
@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
/* keep rtnl locked in successful case */ /* keep rtnl locked in successful case */
return 0; return 0;
out_unlock:
rtnl_unlock();
return err;
} }
static int nl80211_vendor_cmd_dump(struct sk_buff *skb, static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
int err; int err;
struct nlattr *vendor_data; struct nlattr *vendor_data;
rtnl_lock();
err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
if (err) if (err)
return err; goto out;
vcmd_idx = cb->args[2]; vcmd_idx = cb->args[2];
data = (void *)cb->args[3]; data = (void *)cb->args[3];
@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_NETDEV)) { WIPHY_VENDOR_CMD_NEED_NETDEV)) {
if (!wdev) if (!wdev) {
return -EINVAL; err = -EINVAL;
goto out;
}
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
!wdev->netdev) !wdev->netdev) {
return -EINVAL; err = -EINVAL;
goto out;
}
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
if (!wdev_running(wdev)) if (!wdev_running(wdev)) {
return -ENETDOWN; err = -ENETDOWN;
goto out;
}
} }
} }

View File

@ -1,22 +1,23 @@
LIBDIR := ../../../lib LIBDIR := ../../../lib
BPFOBJ := $(LIBDIR)/bpf/bpf.o BPFDIR := $(LIBDIR)/bpf
CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ) CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR)
LDLIBS += -lcap
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
TEST_PROGS := test_kmod.sh TEST_PROGS := test_kmod.sh
all: $(TEST_GEN_PROGS) include ../lib.mk
.PHONY: all clean force BPFOBJ := $(OUTPUT)/bpf.o
$(TEST_GEN_PROGS): $(BPFOBJ)
.PHONY: force
# force a rebuild of BPFOBJ when its dependencies are updated # force a rebuild of BPFOBJ when its dependencies are updated
force: force:
$(BPFOBJ): force $(BPFOBJ): force
$(MAKE) -C $(dir $(BPFOBJ)) $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
$(test_objs): $(BPFOBJ)
include ../lib.mk

View File

@ -80,8 +80,9 @@ static void test_hashmap(int task, void *data)
assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
key = 2; key = 2;
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
key = 1; key = 3;
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
errno == E2BIG);
/* Check that key = 0 doesn't exist. */ /* Check that key = 0 doesn't exist. */
key = 0; key = 0;
@ -110,6 +111,24 @@ static void test_hashmap(int task, void *data)
close(fd); close(fd);
} }
static void test_hashmap_sizes(int task, void *data)
{
int fd, i, j;
for (i = 1; i <= 512; i <<= 1)
for (j = 1; j <= 1 << 18; j <<= 1) {
fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
2, map_flags);
if (fd < 0) {
printf("Failed to create hashmap key=%d value=%d '%s'\n",
i, j, strerror(errno));
exit(1);
}
close(fd);
usleep(10); /* give kernel time to destroy */
}
}
static void test_hashmap_percpu(int task, void *data) static void test_hashmap_percpu(int task, void *data)
{ {
unsigned int nr_cpus = bpf_num_possible_cpus(); unsigned int nr_cpus = bpf_num_possible_cpus();
@ -317,7 +336,10 @@ static void test_arraymap_percpu(int task, void *data)
static void test_arraymap_percpu_many_keys(void) static void test_arraymap_percpu_many_keys(void)
{ {
unsigned int nr_cpus = bpf_num_possible_cpus(); unsigned int nr_cpus = bpf_num_possible_cpus();
unsigned int nr_keys = 20000; /* nr_keys is not too large otherwise the test stresses percpu
* allocator more than anything else
*/
unsigned int nr_keys = 2000;
long values[nr_cpus]; long values[nr_cpus];
int key, fd, i; int key, fd, i;
@ -419,6 +441,7 @@ static void test_map_stress(void)
{ {
run_parallel(100, test_hashmap, NULL); run_parallel(100, test_hashmap, NULL);
run_parallel(100, test_hashmap_percpu, NULL); run_parallel(100, test_hashmap_percpu, NULL);
run_parallel(100, test_hashmap_sizes, NULL);
run_parallel(100, test_arraymap, NULL); run_parallel(100, test_arraymap, NULL);
run_parallel(100, test_arraymap_percpu, NULL); run_parallel(100, test_arraymap_percpu, NULL);