Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Several netfilter fixes from Pablo and the crew: - Handle fragmented packets properly in netfilter conntrack, from Florian Westphal. - Fix SCTP ICMP packet handling, from Ying Xue. - Fix big-endian bug in nftables, from Liping Zhang. - Fix alignment of fake conntrack entry, from Steven Rostedt. 2) Fix feature flags setting in fjes driver, from Taku Izumi. 3) Openvswitch ipv6 tunnel source address not set properly, from Or Gerlitz. 4) Fix jumbo MTU handling in amd-xgbe driver, from Thomas Lendacky. 5) sk->sk_frag.page not released properly in some cases, from Eric Dumazet. 6) Fix RTNL deadlocks in nl80211, from Johannes Berg. 7) Fix erroneous RTNL lockdep splat in crypto, from Herbert Xu. 8) Cure improper inflight handling during AF_UNIX GC, from Andrey Ulanov. 9) sch_dsmark doesn't write to packet headers properly, from Eric Dumazet. 10) Fix SCM_TIMESTAMPING_OPT_STATS handling in TCP, from Soheil Hassas Yeganeh. 11) Add some IDs for Motorola qmi_wwan chips, from Tony Lindgren. 12) Fix nametbl deadlock in tipc, from Ying Xue. 13) GRO and LRO packets not counted correctly in mlx5 driver, from Gal Pressman. 14) Fix reset of internal PHYs in bcmgenet, from Doug Berger. 15) Fix hashmap allocation handling, from Alexei Starovoitov. 16) nl_fib_input() needs stronger netlink message length checking, from Eric Dumazet. 17) Fix double-free of sk->sk_filter during sock clone, from Daniel Borkmann. 18) Fix RX checksum offloading in aquantia driver, from Pavel Belous. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (85 commits) net:ethernet:aquantia: Fix for RX checksum offload. amd-xgbe: Fix the ECC-related bit position definitions sfc: cleanup a condition in efx_udp_tunnel_del() Bluetooth: btqcomsmd: fix compile-test dependency inet: frag: release spinlock before calling icmp_send() tcp: initialize icsk_ack.lrcvtime at session start time genetlink: fix counting regression on ctrl_dumpfamily() socket, bpf: fix sk_filter use after free in sk_clone_lock ipv4: provide stronger user input validation in nl_fib_input() bpf: fix hashmap extra_elems logic enic: update enic maintainers net: bcmgenet: remove bcmgenet_internal_phy_setup() ipv6: make sure to initialize sockc.tsflags before first use fjes: Do not load fjes driver if extended socket device is not power on. fjes: Do not load fjes driver if system does not have extended socket device. net/mlx5e: Count LRO packets correctly net/mlx5e: Count GSO packets correctly net/mlx5: Increase number of max QPs in default profile net/mlx5e: Avoid supporting udp tunnel port ndo for VF reps net/mlx5e: Use the proper UAPI values when offloading TC vlan actions ...
This commit is contained in:
commit
f341d9f08a
18
MAINTAINERS
18
MAINTAINERS
|
@ -3216,7 +3216,6 @@ F: drivers/platform/chrome/
|
|||
|
||||
CISCO VIC ETHERNET NIC DRIVER
|
||||
M: Christian Benvenuti <benve@cisco.com>
|
||||
M: Sujith Sankar <ssujith@cisco.com>
|
||||
M: Govindarajulu Varadarajan <_govind@gmx.com>
|
||||
M: Neel Patel <neepatel@cisco.com>
|
||||
S: Supported
|
||||
|
@ -7774,13 +7773,6 @@ F: include/net/mac80211.h
|
|||
F: net/mac80211/
|
||||
F: drivers/net/wireless/mac80211_hwsim.[ch]
|
||||
|
||||
MACVLAN DRIVER
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/macvlan.c
|
||||
F: include/linux/if_macvlan.h
|
||||
|
||||
MAILBOX API
|
||||
M: Jassi Brar <jassisinghbrar@gmail.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
@ -7853,6 +7845,8 @@ F: drivers/net/ethernet/marvell/mvneta.*
|
|||
MARVELL MWIFIEX WIRELESS DRIVER
|
||||
M: Amitkumar Karwar <akarwar@marvell.com>
|
||||
M: Nishant Sarmukadam <nishants@marvell.com>
|
||||
M: Ganapathi Bhat <gbhat@marvell.com>
|
||||
M: Xinming Hu <huxm@marvell.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/marvell/mwifiex/
|
||||
|
@ -13383,14 +13377,6 @@ W: https://linuxtv.org
|
|||
S: Maintained
|
||||
F: drivers/media/platform/vivid/*
|
||||
|
||||
VLAN (802.1Q)
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/macvlan.c
|
||||
F: include/linux/if_*vlan.h
|
||||
F: net/8021q/
|
||||
|
||||
VLYNQ BUS
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
||||
|
|
|
@ -344,7 +344,8 @@ config BT_WILINK
|
|||
|
||||
config BT_QCOMSMD
|
||||
tristate "Qualcomm SMD based HCI support"
|
||||
depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST
|
||||
depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
|
||||
depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
|
||||
select BT_QCA
|
||||
help
|
||||
Qualcomm SMD based HCI driver.
|
||||
|
|
|
@ -984,29 +984,29 @@
|
|||
#define XP_ECC_CNT1_DESC_DED_WIDTH 8
|
||||
#define XP_ECC_CNT1_DESC_SEC_INDEX 0
|
||||
#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
|
||||
#define XP_ECC_IER_DESC_DED_INDEX 0
|
||||
#define XP_ECC_IER_DESC_DED_INDEX 5
|
||||
#define XP_ECC_IER_DESC_DED_WIDTH 1
|
||||
#define XP_ECC_IER_DESC_SEC_INDEX 1
|
||||
#define XP_ECC_IER_DESC_SEC_INDEX 4
|
||||
#define XP_ECC_IER_DESC_SEC_WIDTH 1
|
||||
#define XP_ECC_IER_RX_DED_INDEX 2
|
||||
#define XP_ECC_IER_RX_DED_INDEX 3
|
||||
#define XP_ECC_IER_RX_DED_WIDTH 1
|
||||
#define XP_ECC_IER_RX_SEC_INDEX 3
|
||||
#define XP_ECC_IER_RX_SEC_INDEX 2
|
||||
#define XP_ECC_IER_RX_SEC_WIDTH 1
|
||||
#define XP_ECC_IER_TX_DED_INDEX 4
|
||||
#define XP_ECC_IER_TX_DED_INDEX 1
|
||||
#define XP_ECC_IER_TX_DED_WIDTH 1
|
||||
#define XP_ECC_IER_TX_SEC_INDEX 5
|
||||
#define XP_ECC_IER_TX_SEC_INDEX 0
|
||||
#define XP_ECC_IER_TX_SEC_WIDTH 1
|
||||
#define XP_ECC_ISR_DESC_DED_INDEX 0
|
||||
#define XP_ECC_ISR_DESC_DED_INDEX 5
|
||||
#define XP_ECC_ISR_DESC_DED_WIDTH 1
|
||||
#define XP_ECC_ISR_DESC_SEC_INDEX 1
|
||||
#define XP_ECC_ISR_DESC_SEC_INDEX 4
|
||||
#define XP_ECC_ISR_DESC_SEC_WIDTH 1
|
||||
#define XP_ECC_ISR_RX_DED_INDEX 2
|
||||
#define XP_ECC_ISR_RX_DED_INDEX 3
|
||||
#define XP_ECC_ISR_RX_DED_WIDTH 1
|
||||
#define XP_ECC_ISR_RX_SEC_INDEX 3
|
||||
#define XP_ECC_ISR_RX_SEC_INDEX 2
|
||||
#define XP_ECC_ISR_RX_SEC_WIDTH 1
|
||||
#define XP_ECC_ISR_TX_DED_INDEX 4
|
||||
#define XP_ECC_ISR_TX_DED_INDEX 1
|
||||
#define XP_ECC_ISR_TX_DED_WIDTH 1
|
||||
#define XP_ECC_ISR_TX_SEC_INDEX 5
|
||||
#define XP_ECC_ISR_TX_SEC_INDEX 0
|
||||
#define XP_ECC_ISR_TX_SEC_WIDTH 1
|
||||
#define XP_I2C_MUTEX_BUSY_INDEX 31
|
||||
#define XP_I2C_MUTEX_BUSY_WIDTH 1
|
||||
|
@ -1148,8 +1148,8 @@
|
|||
#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
|
||||
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
|
||||
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
|
||||
#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
|
||||
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
|
||||
|
@ -1158,6 +1158,8 @@
|
|||
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
|
||||
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
|
||||
#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
|
||||
|
||||
#define RX_NORMAL_DESC0_OVT_INDEX 0
|
||||
#define RX_NORMAL_DESC0_OVT_WIDTH 16
|
||||
|
|
|
@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
|||
|
||||
/* Get the header length */
|
||||
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
FIRST, 1);
|
||||
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
|
||||
RX_NORMAL_DESC2, HL);
|
||||
if (rdata->rx.hdr_len)
|
||||
pdata->ext_stats.rx_split_header_packets++;
|
||||
} else {
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
FIRST, 0);
|
||||
}
|
||||
|
||||
/* Get the RSS hash */
|
||||
|
@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
|||
}
|
||||
}
|
||||
|
||||
/* Get the packet length */
|
||||
rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
|
||||
|
||||
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
|
||||
/* Not all the data has been transferred for this packet */
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
INCOMPLETE, 1);
|
||||
/* Not all the data has been transferred for this packet */
|
||||
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This is the last of the data for this packet */
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
INCOMPLETE, 0);
|
||||
LAST, 1);
|
||||
|
||||
/* Get the packet length */
|
||||
rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
|
||||
|
||||
/* Set checksum done indicator as appropriate */
|
||||
if (netdev->features & NETIF_F_RXCSUM)
|
||||
|
|
|
@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
u8 *packet;
|
||||
unsigned int copy_len;
|
||||
|
||||
skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
||||
/* Start with the header buffer which may contain just the header
|
||||
/* Pull in the header buffer which may contain just the header
|
||||
* or the header plus data
|
||||
*/
|
||||
dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
|
||||
|
@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
|
|||
|
||||
packet = page_address(rdata->rx.hdr.pa.pages) +
|
||||
rdata->rx.hdr.pa.pages_offset;
|
||||
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
|
||||
copy_len = min(rdata->rx.hdr.dma_len, copy_len);
|
||||
skb_copy_to_linear_data(skb, packet, copy_len);
|
||||
skb_put(skb, copy_len);
|
||||
|
||||
len -= copy_len;
|
||||
if (len) {
|
||||
/* Add the remaining data as a frag */
|
||||
dma_sync_single_range_for_cpu(pdata->dev,
|
||||
rdata->rx.buf.dma_base,
|
||||
rdata->rx.buf.dma_off,
|
||||
rdata->rx.buf.dma_len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
rdata->rx.buf.pa.pages,
|
||||
rdata->rx.buf.pa.pages_offset,
|
||||
len, rdata->rx.buf.dma_len);
|
||||
rdata->rx.buf.pa.pages = NULL;
|
||||
}
|
||||
skb_copy_to_linear_data(skb, packet, len);
|
||||
skb_put(skb, len);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
|
||||
struct xgbe_packet_data *packet)
|
||||
{
|
||||
/* Always zero if not the first descriptor */
|
||||
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
|
||||
return 0;
|
||||
|
||||
/* First descriptor with split header, return header length */
|
||||
if (rdata->rx.hdr_len)
|
||||
return rdata->rx.hdr_len;
|
||||
|
||||
/* First descriptor but not the last descriptor and no split header,
|
||||
* so the full buffer was used
|
||||
*/
|
||||
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
|
||||
return rdata->rx.hdr.dma_len;
|
||||
|
||||
/* First descriptor and last descriptor and no split header, so
|
||||
* calculate how much of the buffer was used
|
||||
*/
|
||||
return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
|
||||
}
|
||||
|
||||
static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
|
||||
struct xgbe_packet_data *packet,
|
||||
unsigned int len)
|
||||
{
|
||||
/* Always the full buffer if not the last descriptor */
|
||||
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
|
||||
return rdata->rx.buf.dma_len;
|
||||
|
||||
/* Last descriptor so calculate how much of the buffer was used
|
||||
* for the last bit of data
|
||||
*/
|
||||
return rdata->rx.len - len;
|
||||
}
|
||||
|
||||
static int xgbe_tx_poll(struct xgbe_channel *channel)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = channel->pdata;
|
||||
|
@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
struct napi_struct *napi;
|
||||
struct sk_buff *skb;
|
||||
struct skb_shared_hwtstamps *hwtstamps;
|
||||
unsigned int incomplete, error, context_next, context;
|
||||
unsigned int len, rdesc_len, max_len;
|
||||
unsigned int last, error, context_next, context;
|
||||
unsigned int len, buf1_len, buf2_len, max_len;
|
||||
unsigned int received = 0;
|
||||
int packet_count = 0;
|
||||
|
||||
|
@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
if (!ring)
|
||||
return 0;
|
||||
|
||||
incomplete = 0;
|
||||
last = 0;
|
||||
context_next = 0;
|
||||
|
||||
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
|
||||
|
@ -2137,9 +2155,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
received++;
|
||||
ring->cur++;
|
||||
|
||||
incomplete = XGMAC_GET_BITS(packet->attributes,
|
||||
RX_PACKET_ATTRIBUTES,
|
||||
INCOMPLETE);
|
||||
last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
LAST);
|
||||
context_next = XGMAC_GET_BITS(packet->attributes,
|
||||
RX_PACKET_ATTRIBUTES,
|
||||
CONTEXT_NEXT);
|
||||
|
@ -2148,7 +2165,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
CONTEXT);
|
||||
|
||||
/* Earlier error, just drain the remaining data */
|
||||
if ((incomplete || context_next) && error)
|
||||
if ((!last || context_next) && error)
|
||||
goto read_again;
|
||||
|
||||
if (error || packet->errors) {
|
||||
|
@ -2160,16 +2177,22 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
}
|
||||
|
||||
if (!context) {
|
||||
/* Length is cumulative, get this descriptor's length */
|
||||
rdesc_len = rdata->rx.len - len;
|
||||
len += rdesc_len;
|
||||
/* Get the data length in the descriptor buffers */
|
||||
buf1_len = xgbe_rx_buf1_len(rdata, packet);
|
||||
len += buf1_len;
|
||||
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
|
||||
len += buf2_len;
|
||||
|
||||
if (rdesc_len && !skb) {
|
||||
if (!skb) {
|
||||
skb = xgbe_create_skb(pdata, napi, rdata,
|
||||
rdesc_len);
|
||||
if (!skb)
|
||||
buf1_len);
|
||||
if (!skb) {
|
||||
error = 1;
|
||||
} else if (rdesc_len) {
|
||||
goto skip_data;
|
||||
}
|
||||
}
|
||||
|
||||
if (buf2_len) {
|
||||
dma_sync_single_range_for_cpu(pdata->dev,
|
||||
rdata->rx.buf.dma_base,
|
||||
rdata->rx.buf.dma_off,
|
||||
|
@ -2179,13 +2202,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
rdata->rx.buf.pa.pages,
|
||||
rdata->rx.buf.pa.pages_offset,
|
||||
rdesc_len,
|
||||
buf2_len,
|
||||
rdata->rx.buf.dma_len);
|
||||
rdata->rx.buf.pa.pages = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (incomplete || context_next)
|
||||
skip_data:
|
||||
if (!last || context_next)
|
||||
goto read_again;
|
||||
|
||||
if (!skb)
|
||||
|
@ -2243,7 +2267,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
}
|
||||
|
||||
/* Check if we need to save state before leaving */
|
||||
if (received && (incomplete || context_next)) {
|
||||
if (received && (!last || context_next)) {
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
rdata->state_saved = 1;
|
||||
rdata->state.skb = skb;
|
||||
|
|
|
@ -98,6 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
|
|||
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
ndev->mtu = new_mtu;
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
aq_ndev_close(ndev);
|
||||
|
|
|
@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
|
|||
.tx_rings = HW_ATL_A0_TX_RINGS,
|
||||
.rx_rings = HW_ATL_A0_RX_RINGS,
|
||||
.hw_features = NETIF_F_HW_CSUM |
|
||||
NETIF_F_RXCSUM |
|
||||
NETIF_F_RXHASH |
|
||||
NETIF_F_SG |
|
||||
NETIF_F_TSO,
|
||||
|
|
|
@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
|
|||
.tx_rings = HW_ATL_B0_TX_RINGS,
|
||||
.rx_rings = HW_ATL_B0_RX_RINGS,
|
||||
.hw_features = NETIF_F_HW_CSUM |
|
||||
NETIF_F_RXCSUM |
|
||||
NETIF_F_RXHASH |
|
||||
NETIF_F_SG |
|
||||
NETIF_F_TSO |
|
||||
|
|
|
@ -3481,7 +3481,8 @@ static int bcmgenet_suspend(struct device *d)
|
|||
|
||||
bcmgenet_netif_stop(dev);
|
||||
|
||||
phy_suspend(priv->phydev);
|
||||
if (!device_may_wakeup(d))
|
||||
phy_suspend(priv->phydev);
|
||||
|
||||
netif_device_detach(dev);
|
||||
|
||||
|
@ -3578,7 +3579,8 @@ static int bcmgenet_resume(struct device *d)
|
|||
|
||||
netif_device_attach(dev);
|
||||
|
||||
phy_resume(priv->phydev);
|
||||
if (!device_may_wakeup(d))
|
||||
phy_resume(priv->phydev);
|
||||
|
||||
if (priv->eee.eee_enabled)
|
||||
bcmgenet_eee_enable_set(dev, true);
|
||||
|
|
|
@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
|
|||
udelay(60);
|
||||
}
|
||||
|
||||
static void bcmgenet_internal_phy_setup(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
u32 reg;
|
||||
|
||||
/* Power up PHY */
|
||||
bcmgenet_phy_power_set(dev, true);
|
||||
/* enable APD */
|
||||
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
|
||||
reg |= EXT_PWR_DN_EN_LD;
|
||||
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
|
||||
bcmgenet_mii_reset(dev);
|
||||
}
|
||||
|
||||
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
|
||||
{
|
||||
u32 reg;
|
||||
|
@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev)
|
|||
|
||||
if (priv->internal_phy) {
|
||||
phy_name = "internal PHY";
|
||||
bcmgenet_internal_phy_setup(dev);
|
||||
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
|
||||
phy_name = "MoCA";
|
||||
bcmgenet_moca_phy_setup(priv);
|
||||
|
|
|
@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
|
|||
return PTR_ERR(kern_buf);
|
||||
|
||||
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
|
||||
if (rc < 2) {
|
||||
if (rc < 2 || len > UINT_MAX >> 2) {
|
||||
netdev_warn(bnad->netdev, "failed to read user buffer\n");
|
||||
kfree(kern_buf);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1257,6 +1257,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
|
|||
release_sub_crq_queue(adapter,
|
||||
adapter->tx_scrq[i]);
|
||||
}
|
||||
kfree(adapter->tx_scrq);
|
||||
adapter->tx_scrq = NULL;
|
||||
}
|
||||
|
||||
|
@ -1269,6 +1270,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
|
|||
release_sub_crq_queue(adapter,
|
||||
adapter->rx_scrq[i]);
|
||||
}
|
||||
kfree(adapter->rx_scrq);
|
||||
adapter->rx_scrq = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev)
|
|||
rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
|
||||
if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
|
||||
/* PCI might be offline */
|
||||
|
||||
/* If device removal has been requested,
|
||||
* do not continue retrying.
|
||||
*/
|
||||
if (dev->persist->interface_state &
|
||||
MLX4_INTERFACE_STATE_NOWAIT) {
|
||||
mlx4_warn(dev,
|
||||
"communication channel is offline\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
msleep(100);
|
||||
wr_toggle = swab32(readl(&priv->mfunc.comm->
|
||||
slave_write));
|
||||
|
|
|
@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
|
|||
(u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
|
||||
if (!offline_bit)
|
||||
return 0;
|
||||
|
||||
/* If device removal has been requested,
|
||||
* do not continue retrying.
|
||||
*/
|
||||
if (dev->persist->interface_state &
|
||||
MLX4_INTERFACE_STATE_NOWAIT)
|
||||
break;
|
||||
|
||||
/* There are cases as part of AER/Reset flow that PF needs
|
||||
* around 100 msec to load. We therefore sleep for 100 msec
|
||||
* to allow other tasks to make use of that CPU during this
|
||||
|
@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
|
|||
struct devlink *devlink = priv_to_devlink(priv);
|
||||
int active_vfs = 0;
|
||||
|
||||
if (mlx4_is_slave(dev))
|
||||
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
|
||||
|
||||
mutex_lock(&persist->interface_state_mutex);
|
||||
persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
|
||||
mutex_unlock(&persist->interface_state_mutex);
|
||||
|
|
|
@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|||
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
|
||||
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
|
||||
case MLX5_CMD_OP_QUERY_Q_COUNTER:
|
||||
case MLX5_CMD_OP_SET_RATE_LIMIT:
|
||||
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
|
||||
case MLX5_CMD_OP_ALLOC_PD:
|
||||
case MLX5_CMD_OP_ALLOC_UAR:
|
||||
case MLX5_CMD_OP_CONFIG_INT_MODERATION:
|
||||
|
@ -497,6 +499,8 @@ const char *mlx5_command_str(int command)
|
|||
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
|
||||
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
|
||||
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
|
||||
MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
|
||||
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
|
||||
MLX5_COMMAND_STR_CASE(ALLOC_PD);
|
||||
MLX5_COMMAND_STR_CASE(DEALLOC_PD);
|
||||
MLX5_COMMAND_STR_CASE(ALLOC_UAR);
|
||||
|
|
|
@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
|
|||
int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
|
||||
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
|
||||
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
|
||||
void mlx5e_add_vxlan_port(struct net_device *netdev,
|
||||
struct udp_tunnel_info *ti);
|
||||
void mlx5e_del_vxlan_port(struct net_device *netdev,
|
||||
struct udp_tunnel_info *ti);
|
||||
|
||||
int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
|
||||
void *sp);
|
||||
|
|
|
@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
|
|||
vf_stats);
|
||||
}
|
||||
|
||||
void mlx5e_add_vxlan_port(struct net_device *netdev,
|
||||
struct udp_tunnel_info *ti)
|
||||
static void mlx5e_add_vxlan_port(struct net_device *netdev,
|
||||
struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
|
@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev,
|
|||
mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
|
||||
}
|
||||
|
||||
void mlx5e_del_vxlan_port(struct net_device *netdev,
|
||||
struct udp_tunnel_info *ti)
|
||||
static void mlx5e_del_vxlan_port(struct net_device *netdev,
|
||||
struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
|
|
|
@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
|
|||
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
|
||||
.ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
|
||||
.ndo_get_stats64 = mlx5e_rep_get_stats,
|
||||
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
|
||||
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
|
||||
.ndo_has_offload_stats = mlx5e_has_offload_stats,
|
||||
.ndo_get_offload_stats = mlx5e_get_offload_stats,
|
||||
};
|
||||
|
|
|
@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
|||
if (lro_num_seg > 1) {
|
||||
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
|
||||
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
|
||||
/* Subtract one since we already counted this as one
|
||||
* "regular" packet in mlx5e_complete_rx_cqe()
|
||||
*/
|
||||
rq->stats.packets += lro_num_seg - 1;
|
||||
rq->stats.lro_packets++;
|
||||
rq->stats.lro_bytes += cqe_bcnt;
|
||||
}
|
||||
|
|
|
@ -133,6 +133,23 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
|||
return rule;
|
||||
}
|
||||
|
||||
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
struct mlx5_fc *counter = NULL;
|
||||
|
||||
if (!IS_ERR(flow->rule)) {
|
||||
counter = mlx5_flow_rule_counter(flow->rule);
|
||||
mlx5_del_flow_rules(flow->rule);
|
||||
mlx5_fc_destroy(priv->mdev, counter);
|
||||
}
|
||||
|
||||
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
|
||||
mlx5_destroy_flow_table(priv->fs.tc.t);
|
||||
priv->fs.tc.t = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct mlx5_flow_handle *
|
||||
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
|
@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
|||
}
|
||||
|
||||
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow) {
|
||||
struct mlx5e_tc_flow *flow);
|
||||
|
||||
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
|
||||
|
||||
mlx5_eswitch_del_vlan_action(esw, flow->attr);
|
||||
|
||||
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
|
||||
mlx5e_detach_encap(priv, flow);
|
||||
}
|
||||
|
||||
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
struct list_head *next = flow->encap.next;
|
||||
|
||||
list_del(&flow->encap);
|
||||
|
@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
|
|||
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct mlx5_fc *counter = NULL;
|
||||
|
||||
if (!IS_ERR(flow->rule)) {
|
||||
counter = mlx5_flow_rule_counter(flow->rule);
|
||||
mlx5_del_flow_rules(flow->rule);
|
||||
mlx5_fc_destroy(priv->mdev, counter);
|
||||
}
|
||||
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
|
||||
mlx5_eswitch_del_vlan_action(esw, flow->attr);
|
||||
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
|
||||
mlx5e_detach_encap(priv, flow);
|
||||
}
|
||||
|
||||
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
|
||||
mlx5_destroy_flow_table(priv->fs.tc.t);
|
||||
priv->fs.tc.t = NULL;
|
||||
}
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
|
||||
mlx5e_tc_del_fdb_flow(priv, flow);
|
||||
else
|
||||
mlx5e_tc_del_nic_flow(priv, flow);
|
||||
}
|
||||
|
||||
static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
|
||||
|
@ -248,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
|||
skb_flow_dissector_target(f->dissector,
|
||||
FLOW_DISSECTOR_KEY_ENC_PORTS,
|
||||
f->mask);
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
|
||||
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
|
||||
|
||||
/* Full udp dst port must be given */
|
||||
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
|
||||
goto vxlan_match_offload_err;
|
||||
|
||||
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
|
||||
if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
|
||||
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
|
||||
parse_vxlan_attr(spec, f);
|
||||
else {
|
||||
|
@ -976,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
|||
struct mlx5_esw_flow_attr *attr)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
|
||||
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
|
||||
unsigned short family = ip_tunnel_info_af(tun_info);
|
||||
struct ip_tunnel_key *key = &tun_info->key;
|
||||
struct mlx5_encap_entry *e;
|
||||
|
@ -996,7 +1020,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
|
||||
if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
|
||||
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
|
||||
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
|
||||
} else {
|
||||
|
@ -1112,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
}
|
||||
|
||||
if (is_tcf_vlan(a)) {
|
||||
if (tcf_vlan_action(a) == VLAN_F_POP) {
|
||||
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
|
||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
|
||||
} else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
|
||||
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
|
||||
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
|
||||
attr->vlan = tcf_vlan_push_vid(a);
|
||||
} else { /* action is TCA_VLAN_ACT_MODIFY */
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
|
|||
sq->stats.tso_bytes += skb->len - ihs;
|
||||
}
|
||||
|
||||
sq->stats.packets += skb_shinfo(skb)->gso_segs;
|
||||
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
|
||||
} else {
|
||||
bf = sq->bf_budget &&
|
||||
!skb->xmit_more &&
|
||||
!skb_shinfo(skb)->nr_frags;
|
||||
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
|
||||
sq->stats.packets++;
|
||||
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
}
|
||||
|
||||
sq->stats.bytes += num_bytes;
|
||||
wi->num_bytes = num_bytes;
|
||||
|
||||
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
|
||||
|
@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
|
|||
if (bf)
|
||||
sq->bf_budget--;
|
||||
|
||||
sq->stats.packets++;
|
||||
sq->stats.bytes += num_bytes;
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dma_unmap_wqe_err:
|
||||
|
|
|
@ -209,6 +209,7 @@ struct mlx5_esw_offload {
|
|||
struct mlx5_eswitch_rep *vport_reps;
|
||||
DECLARE_HASHTABLE(encap_tbl, 8);
|
||||
u8 inline_mode;
|
||||
u64 num_flows;
|
||||
};
|
||||
|
||||
struct mlx5_eswitch {
|
||||
|
@ -271,6 +272,11 @@ struct mlx5_flow_handle *
|
|||
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct mlx5_esw_flow_attr *attr);
|
||||
void
|
||||
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_handle *rule,
|
||||
struct mlx5_esw_flow_attr *attr);
|
||||
|
||||
struct mlx5_flow_handle *
|
||||
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
|
||||
|
||||
|
|
|
@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|||
spec, &flow_act, dest, i);
|
||||
if (IS_ERR(rule))
|
||||
mlx5_fc_destroy(esw->dev, counter);
|
||||
else
|
||||
esw->offloads.num_flows++;
|
||||
|
||||
return rule;
|
||||
}
|
||||
|
||||
void
|
||||
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_handle *rule,
|
||||
struct mlx5_esw_flow_attr *attr)
|
||||
{
|
||||
struct mlx5_fc *counter = NULL;
|
||||
|
||||
if (!IS_ERR(rule)) {
|
||||
counter = mlx5_flow_rule_counter(rule);
|
||||
mlx5_del_flow_rules(rule);
|
||||
mlx5_fc_destroy(esw->dev, counter);
|
||||
esw->offloads.num_flows--;
|
||||
}
|
||||
}
|
||||
|
||||
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
|
@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
|||
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->offloads.num_flows > 0) {
|
||||
esw_warn(dev, "Can't set inline mode when flows are configured\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
|
||||
if (err)
|
||||
goto out;
|
||||
|
|
|
@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = {
|
|||
[2] = {
|
||||
.mask = MLX5_PROF_MASK_QP_SIZE |
|
||||
MLX5_PROF_MASK_MR_CACHE,
|
||||
.log_max_qp = 17,
|
||||
.log_max_qp = 18,
|
||||
.mr_cache[0] = {
|
||||
.size = 500,
|
||||
.limit = 250
|
||||
|
|
|
@ -2404,7 +2404,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t
|
|||
tnl.type = (u16)efx_tunnel_type;
|
||||
tnl.port = ti->port;
|
||||
|
||||
if (efx->type->udp_tnl_add_port)
|
||||
if (efx->type->udp_tnl_del_port)
|
||||
(void)efx->type->udp_tnl_del_port(efx, tnl);
|
||||
}
|
||||
|
||||
|
|
|
@ -74,15 +74,21 @@ config TI_CPSW
|
|||
will be called cpsw.
|
||||
|
||||
config TI_CPTS
|
||||
tristate "TI Common Platform Time Sync (CPTS) Support"
|
||||
bool "TI Common Platform Time Sync (CPTS) Support"
|
||||
depends on TI_CPSW || TI_KEYSTONE_NETCP
|
||||
imply PTP_1588_CLOCK
|
||||
depends on PTP_1588_CLOCK
|
||||
---help---
|
||||
This driver supports the Common Platform Time Sync unit of
|
||||
the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
|
||||
The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
|
||||
driver offers a PTP Hardware Clock.
|
||||
|
||||
config TI_CPTS_MOD
|
||||
tristate
|
||||
depends on TI_CPTS
|
||||
default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
|
||||
default m
|
||||
|
||||
config TI_KEYSTONE_NETCP
|
||||
tristate "TI Keystone NETCP Core Support"
|
||||
select TI_CPSW_ALE
|
||||
|
|
|
@ -12,7 +12,7 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
|
|||
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
|
||||
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
|
||||
obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
|
||||
obj-$(CONFIG_TI_CPTS) += cpts.o
|
||||
obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
|
||||
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
|
||||
ti_cpsw-y := cpsw.o
|
||||
|
||||
|
|
|
@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
|
||||
|
||||
static int fjes_request_irq(struct fjes_adapter *);
|
||||
static void fjes_free_irq(struct fjes_adapter *);
|
||||
|
||||
|
@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int);
|
|||
static int fjes_poll(struct napi_struct *, int);
|
||||
|
||||
static const struct acpi_device_id fjes_acpi_ids[] = {
|
||||
{"PNP0C02", 0},
|
||||
{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
|
||||
|
@ -115,18 +117,17 @@ static struct resource fjes_resource[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static int fjes_acpi_add(struct acpi_device *device)
|
||||
static bool is_extended_socket_device(struct acpi_device *device)
|
||||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
|
||||
char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
|
||||
struct platform_device *plat_dev;
|
||||
union acpi_object *str;
|
||||
acpi_status status;
|
||||
int result;
|
||||
|
||||
status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -ENODEV;
|
||||
return false;
|
||||
|
||||
str = buffer.pointer;
|
||||
result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
|
||||
|
@ -136,10 +137,42 @@ static int fjes_acpi_add(struct acpi_device *device)
|
|||
|
||||
if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
|
||||
kfree(buffer.pointer);
|
||||
return -ENODEV;
|
||||
return false;
|
||||
}
|
||||
kfree(buffer.pointer);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int acpi_check_extended_socket_status(struct acpi_device *device)
|
||||
{
|
||||
unsigned long long sta;
|
||||
acpi_status status;
|
||||
|
||||
status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -ENODEV;
|
||||
|
||||
if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
|
||||
(sta & ACPI_STA_DEVICE_ENABLED) &&
|
||||
(sta & ACPI_STA_DEVICE_UI) &&
|
||||
(sta & ACPI_STA_DEVICE_FUNCTIONING)))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fjes_acpi_add(struct acpi_device *device)
|
||||
{
|
||||
struct platform_device *plat_dev;
|
||||
acpi_status status;
|
||||
|
||||
if (!is_extended_socket_device(device))
|
||||
return -ENODEV;
|
||||
|
||||
if (acpi_check_extended_socket_status(device))
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
|
||||
fjes_get_acpi_resource, fjes_resource);
|
||||
if (ACPI_FAILURE(status))
|
||||
|
@ -1316,7 +1349,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
|
|||
netdev->min_mtu = fjes_support_mtu[0];
|
||||
netdev->max_mtu = fjes_support_mtu[3];
|
||||
netdev->flags |= IFF_BROADCAST;
|
||||
netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
|
||||
static void fjes_irq_watch_task(struct work_struct *work)
|
||||
|
@ -1473,11 +1506,44 @@ static void fjes_watch_unshare_task(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
|
||||
void *context, void **return_value)
|
||||
{
|
||||
struct acpi_device *device;
|
||||
bool *found = context;
|
||||
int result;
|
||||
|
||||
result = acpi_bus_get_device(obj_handle, &device);
|
||||
if (result)
|
||||
return AE_OK;
|
||||
|
||||
if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
|
||||
return AE_OK;
|
||||
|
||||
if (!is_extended_socket_device(device))
|
||||
return AE_OK;
|
||||
|
||||
if (acpi_check_extended_socket_status(device))
|
||||
return AE_OK;
|
||||
|
||||
*found = true;
|
||||
return AE_CTRL_TERMINATE;
|
||||
}
|
||||
|
||||
/* fjes_init_module - Driver Registration Routine */
|
||||
static int __init fjes_init_module(void)
|
||||
{
|
||||
bool found = false;
|
||||
int result;
|
||||
|
||||
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
|
||||
acpi_find_extended_socket_device, NULL, &found,
|
||||
NULL);
|
||||
|
||||
if (!found)
|
||||
return -ENODEV;
|
||||
|
||||
pr_info("%s - version %s - %s\n",
|
||||
fjes_driver_string, fjes_driver_version, fjes_copyright);
|
||||
|
||||
|
|
|
@ -1231,8 +1231,11 @@ void netvsc_channel_cb(void *context)
|
|||
return;
|
||||
|
||||
net_device = net_device_to_netvsc_device(ndev);
|
||||
if (unlikely(net_device->destroy) &&
|
||||
netvsc_channel_idle(net_device, q_idx))
|
||||
if (unlikely(!net_device))
|
||||
return;
|
||||
|
||||
if (unlikely(net_device->destroy &&
|
||||
netvsc_channel_idle(net_device, q_idx)))
|
||||
return;
|
||||
|
||||
/* commit_rd_index() -> hv_signal_on_read() needs this. */
|
||||
|
|
|
@ -1931,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
|
|||
return -EINVAL;
|
||||
|
||||
tun->set_features = features;
|
||||
tun->dev->wanted_features &= ~TUN_USER_FEATURES;
|
||||
tun->dev->wanted_features |= features;
|
||||
netdev_update_features(tun->dev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -580,6 +580,10 @@ static const struct usb_device_id products[] = {
|
|||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* Motorola Mapphone devices with MDM6600 */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
|
||||
/* 2. Combined interface devices matching on class+protocol */
|
||||
{ /* Huawei E367 and possibly others in "Windows mode" */
|
||||
|
@ -925,6 +929,8 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
||||
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
|
||||
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#define NETNEXT_VERSION "08"
|
||||
|
||||
/* Information for net */
|
||||
#define NET_VERSION "8"
|
||||
#define NET_VERSION "9"
|
||||
|
||||
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
|
||||
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
|
||||
|
@ -501,6 +501,8 @@ enum rtl_register_content {
|
|||
#define RTL8153_RMS RTL8153_MAX_PACKET
|
||||
#define RTL8152_TX_TIMEOUT (5 * HZ)
|
||||
#define RTL8152_NAPI_WEIGHT 64
|
||||
#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
|
||||
sizeof(struct rx_desc) + RX_ALIGN)
|
||||
|
||||
/* rtl8152 flags */
|
||||
enum rtl8152_flags {
|
||||
|
@ -1362,6 +1364,7 @@ static int alloc_all_mem(struct r8152 *tp)
|
|||
spin_lock_init(&tp->rx_lock);
|
||||
spin_lock_init(&tp->tx_lock);
|
||||
INIT_LIST_HEAD(&tp->tx_free);
|
||||
INIT_LIST_HEAD(&tp->rx_done);
|
||||
skb_queue_head_init(&tp->tx_queue);
|
||||
skb_queue_head_init(&tp->rx_queue);
|
||||
|
||||
|
@ -2252,8 +2255,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
|
|||
|
||||
static void r8153_set_rx_early_size(struct r8152 *tp)
|
||||
{
|
||||
u32 mtu = tp->netdev->mtu;
|
||||
u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
|
||||
u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
|
||||
|
||||
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
|
||||
}
|
||||
|
@ -2898,7 +2900,8 @@ static void r8153_first_init(struct r8152 *tp)
|
|||
|
||||
rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
|
||||
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
|
||||
ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
|
||||
|
@ -2950,7 +2953,8 @@ static void r8153_enter_oob(struct r8152 *tp)
|
|||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
|
||||
ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
|
||||
ocp_data &= ~TEREDO_WAKE_MASK;
|
||||
|
@ -4200,8 +4204,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
|
|||
|
||||
dev->mtu = new_mtu;
|
||||
|
||||
if (netif_running(dev) && netif_carrier_ok(dev))
|
||||
r8153_set_rx_early_size(tp);
|
||||
if (netif_running(dev)) {
|
||||
u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE;
|
||||
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
|
||||
|
||||
if (netif_carrier_ok(dev))
|
||||
r8153_set_rx_early_size(tp);
|
||||
}
|
||||
|
||||
mutex_unlock(&tp->control);
|
||||
|
||||
|
|
|
@ -462,8 +462,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
|
|||
}
|
||||
|
||||
if (rt6_local) {
|
||||
if (rt6_local->rt6i_idev)
|
||||
if (rt6_local->rt6i_idev) {
|
||||
in6_dev_put(rt6_local->rt6i_idev);
|
||||
rt6_local->rt6i_idev = NULL;
|
||||
}
|
||||
|
||||
dst = &rt6_local->dst;
|
||||
dev_put(dst->dev);
|
||||
|
|
|
@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = {
|
|||
.rtc_soc_base_address = 0x00000800,
|
||||
.rtc_wmac_base_address = 0x00001000,
|
||||
.soc_core_base_address = 0x0003a000,
|
||||
.wlan_mac_base_address = 0x00020000,
|
||||
.wlan_mac_base_address = 0x00010000,
|
||||
.ce_wrapper_base_address = 0x00034000,
|
||||
.ce0_base_address = 0x00034400,
|
||||
.ce1_base_address = 0x00034800,
|
||||
|
|
|
@ -2319,7 +2319,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
|
|||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
|
||||
/* Called when we need to transmit (a) frame(s) from agg queue */
|
||||
/* Called when we need to transmit (a) frame(s) from agg or dqa queue */
|
||||
|
||||
iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
|
||||
tids, more_data, true);
|
||||
|
@ -2338,7 +2338,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
|||
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
|
||||
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
|
||||
|
||||
if (tid_data->state != IWL_AGG_ON &&
|
||||
if (!iwl_mvm_is_dqa_supported(mvm) &&
|
||||
tid_data->state != IWL_AGG_ON &&
|
||||
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -3135,7 +3135,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
|
|||
struct ieee80211_sta *sta,
|
||||
enum ieee80211_frame_release_type reason,
|
||||
u16 cnt, u16 tids, bool more_data,
|
||||
bool agg)
|
||||
bool single_sta_queue)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_add_sta_cmd cmd = {
|
||||
|
@ -3155,14 +3155,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
|
|||
for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
|
||||
cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
|
||||
|
||||
/* If we're releasing frames from aggregation queues then check if the
|
||||
* all queues combined that we're releasing frames from have
|
||||
/* If we're releasing frames from aggregation or dqa queues then check
|
||||
* if all the queues that we're releasing frames from, combined, have:
|
||||
* - more frames than the service period, in which case more_data
|
||||
* needs to be set
|
||||
* - fewer than 'cnt' frames, in which case we need to adjust the
|
||||
* firmware command (but do that unconditionally)
|
||||
*/
|
||||
if (agg) {
|
||||
if (single_sta_queue) {
|
||||
int remaining = cnt;
|
||||
int sleep_tx_count;
|
||||
|
||||
|
@ -3172,7 +3172,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
|
|||
u16 n_queued;
|
||||
|
||||
tid_data = &mvmsta->tid_data[tid];
|
||||
if (WARN(tid_data->state != IWL_AGG_ON &&
|
||||
if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
|
||||
tid_data->state != IWL_AGG_ON &&
|
||||
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
|
||||
"TID %d state is %d\n",
|
||||
tid, tid_data->state)) {
|
||||
|
|
|
@ -547,7 +547,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
|
|||
struct ieee80211_sta *sta,
|
||||
enum ieee80211_frame_release_type reason,
|
||||
u16 cnt, u16 tids, bool more_data,
|
||||
bool agg);
|
||||
bool single_sta_queue);
|
||||
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
|
||||
bool drain);
|
||||
void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -34,6 +34,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -628,8 +629,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
* values.
|
||||
* Note that we don't need to make sure it isn't agg'd, since we're
|
||||
* TXing non-sta
|
||||
* For DQA mode - we shouldn't increase it though
|
||||
*/
|
||||
atomic_inc(&mvm->pending_frames[sta_id]);
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
atomic_inc(&mvm->pending_frames[sta_id]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1005,11 +1008,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
spin_unlock(&mvmsta->lock);
|
||||
|
||||
/* Increase pending frames count if this isn't AMPDU */
|
||||
if ((iwl_mvm_is_dqa_supported(mvm) &&
|
||||
mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
|
||||
mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
|
||||
(!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
|
||||
/* Increase pending frames count if this isn't AMPDU or DQA queue */
|
||||
if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
|
||||
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
|
||||
|
||||
return 0;
|
||||
|
@ -1079,12 +1079,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
|
|||
lockdep_assert_held(&mvmsta->lock);
|
||||
|
||||
if ((tid_data->state == IWL_AGG_ON ||
|
||||
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
|
||||
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
|
||||
iwl_mvm_is_dqa_supported(mvm)) &&
|
||||
iwl_mvm_tid_queued(tid_data) == 0) {
|
||||
/*
|
||||
* Now that this aggregation queue is empty tell mac80211 so it
|
||||
* knows we no longer have frames buffered for the station on
|
||||
* this TID (for the TIM bitmap calculation.)
|
||||
* Now that this aggregation or DQA queue is empty tell
|
||||
* mac80211 so it knows we no longer have frames buffered for
|
||||
* the station on this TID (for the TIM bitmap calculation.)
|
||||
*/
|
||||
ieee80211_sta_set_buffered(sta, tid, false);
|
||||
}
|
||||
|
@ -1257,7 +1258,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
u8 skb_freed = 0;
|
||||
u16 next_reclaimed, seq_ctl;
|
||||
bool is_ndp = false;
|
||||
bool txq_agg = false; /* Is this TXQ aggregated */
|
||||
|
||||
__skb_queue_head_init(&skbs);
|
||||
|
||||
|
@ -1283,6 +1283,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
info->flags |= IEEE80211_TX_STAT_ACK;
|
||||
break;
|
||||
case TX_STATUS_FAIL_DEST_PS:
|
||||
/* In DQA, the FW should have stopped the queue and not
|
||||
* return this status
|
||||
*/
|
||||
WARN_ON(iwl_mvm_is_dqa_supported(mvm));
|
||||
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
|
||||
break;
|
||||
default:
|
||||
|
@ -1387,15 +1391,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
bool send_eosp_ndp = false;
|
||||
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
enum iwl_mvm_agg_state state;
|
||||
|
||||
state = mvmsta->tid_data[tid].state;
|
||||
txq_agg = (state == IWL_AGG_ON ||
|
||||
state == IWL_EMPTYING_HW_QUEUE_DELBA);
|
||||
} else {
|
||||
txq_agg = txq_id >= mvm->first_agg_queue;
|
||||
}
|
||||
|
||||
if (!is_ndp) {
|
||||
tid_data->next_reclaimed = next_reclaimed;
|
||||
|
@ -1452,11 +1447,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
* If the txq is not an AMPDU queue, there is no chance we freed
|
||||
* several skbs. Check that out...
|
||||
*/
|
||||
if (txq_agg)
|
||||
if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
|
||||
goto out;
|
||||
|
||||
/* We can't free more than one frame at once on a shared queue */
|
||||
WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
|
||||
WARN_ON(skb_freed > 1);
|
||||
|
||||
/* If we have still frames for this STA nothing to do here */
|
||||
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
|
||||
|
|
|
@ -57,8 +57,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
|
|||
* In case of any errors during inittialization, this function also ensures
|
||||
* proper cleanup before exiting.
|
||||
*/
|
||||
static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
|
||||
void **padapter)
|
||||
static int mwifiex_register(void *card, struct device *dev,
|
||||
struct mwifiex_if_ops *if_ops, void **padapter)
|
||||
{
|
||||
struct mwifiex_adapter *adapter;
|
||||
int i;
|
||||
|
@ -68,6 +68,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
|
|||
return -ENOMEM;
|
||||
|
||||
*padapter = adapter;
|
||||
adapter->dev = dev;
|
||||
adapter->card = card;
|
||||
|
||||
/* Save interface specific operations in adapter */
|
||||
|
@ -1568,12 +1569,11 @@ mwifiex_add_card(void *card, struct completion *fw_done,
|
|||
{
|
||||
struct mwifiex_adapter *adapter;
|
||||
|
||||
if (mwifiex_register(card, if_ops, (void **)&adapter)) {
|
||||
if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) {
|
||||
pr_err("%s: software init failed\n", __func__);
|
||||
goto err_init_sw;
|
||||
}
|
||||
|
||||
adapter->dev = dev;
|
||||
mwifiex_probe_of(adapter);
|
||||
|
||||
adapter->iface_type = iface_type;
|
||||
|
@ -1718,6 +1718,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
|
|||
wiphy_unregister(adapter->wiphy);
|
||||
wiphy_free(adapter->wiphy);
|
||||
|
||||
if (adapter->irq_wakeup >= 0)
|
||||
device_init_wakeup(adapter->dev, false);
|
||||
|
||||
/* Unregister device */
|
||||
mwifiex_dbg(adapter, INFO,
|
||||
"info: unregister device\n");
|
||||
|
|
|
@ -2739,6 +2739,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
|
|||
schedule_work(&card->work);
|
||||
}
|
||||
|
||||
static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
|
||||
{
|
||||
struct pcie_service_card *card = adapter->card;
|
||||
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
|
||||
|
||||
if (reg->sleep_cookie)
|
||||
mwifiex_pcie_delete_sleep_cookie_buf(adapter);
|
||||
|
||||
mwifiex_pcie_delete_cmdrsp_buf(adapter);
|
||||
mwifiex_pcie_delete_evtbd_ring(adapter);
|
||||
mwifiex_pcie_delete_rxbd_ring(adapter);
|
||||
mwifiex_pcie_delete_txbd_ring(adapter);
|
||||
card->cmdrsp_buf = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function initializes the PCI-E host memory space, WCB rings, etc.
|
||||
*
|
||||
|
@ -2850,13 +2865,6 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
|
|||
|
||||
/*
|
||||
* This function cleans up the allocated card buffers.
|
||||
*
|
||||
* The following are freed by this function -
|
||||
* - TXBD ring buffers
|
||||
* - RXBD ring buffers
|
||||
* - Event BD ring buffers
|
||||
* - Command response ring buffer
|
||||
* - Sleep cookie buffer
|
||||
*/
|
||||
static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
|
||||
{
|
||||
|
@ -2875,6 +2883,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
|
|||
"Failed to write driver not-ready signature\n");
|
||||
}
|
||||
|
||||
mwifiex_pcie_free_buffers(adapter);
|
||||
|
||||
if (pdev) {
|
||||
pci_iounmap(pdev, card->pci_mmap);
|
||||
pci_iounmap(pdev, card->pci_mmap1);
|
||||
|
@ -3126,10 +3136,7 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
|
|||
pci_iounmap(pdev, card->pci_mmap1);
|
||||
}
|
||||
|
||||
/* This function cleans up the PCI-E host memory space.
|
||||
* Some code is extracted from mwifiex_unregister_dev()
|
||||
*
|
||||
*/
|
||||
/* This function cleans up the PCI-E host memory space. */
|
||||
static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
|
||||
{
|
||||
struct pcie_service_card *card = adapter->card;
|
||||
|
@ -3140,14 +3147,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
|
|||
|
||||
adapter->seq_num = 0;
|
||||
|
||||
if (reg->sleep_cookie)
|
||||
mwifiex_pcie_delete_sleep_cookie_buf(adapter);
|
||||
|
||||
mwifiex_pcie_delete_cmdrsp_buf(adapter);
|
||||
mwifiex_pcie_delete_evtbd_ring(adapter);
|
||||
mwifiex_pcie_delete_rxbd_ring(adapter);
|
||||
mwifiex_pcie_delete_txbd_ring(adapter);
|
||||
card->cmdrsp_buf = NULL;
|
||||
mwifiex_pcie_free_buffers(adapter);
|
||||
}
|
||||
|
||||
static struct mwifiex_if_ops pcie_ops = {
|
||||
|
|
|
@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
|
|||
return len;
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_transport_cancel_pkt(struct vsock_sock *vsk)
|
||||
{
|
||||
struct vhost_vsock *vsock;
|
||||
struct virtio_vsock_pkt *pkt, *n;
|
||||
int cnt = 0;
|
||||
LIST_HEAD(freeme);
|
||||
|
||||
/* Find the vhost_vsock according to guest context id */
|
||||
vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
|
||||
if (!vsock)
|
||||
return -ENODEV;
|
||||
|
||||
spin_lock_bh(&vsock->send_pkt_list_lock);
|
||||
list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
|
||||
if (pkt->vsk != vsk)
|
||||
continue;
|
||||
list_move(&pkt->list, &freeme);
|
||||
}
|
||||
spin_unlock_bh(&vsock->send_pkt_list_lock);
|
||||
|
||||
list_for_each_entry_safe(pkt, n, &freeme, list) {
|
||||
if (pkt->reply)
|
||||
cnt++;
|
||||
list_del(&pkt->list);
|
||||
virtio_transport_free_pkt(pkt);
|
||||
}
|
||||
|
||||
if (cnt) {
|
||||
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
|
||||
int new_cnt;
|
||||
|
||||
new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
|
||||
if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
|
||||
vhost_poll_queue(&tx_vq->poll);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct virtio_vsock_pkt *
|
||||
vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
|
||||
unsigned int out, unsigned int in)
|
||||
|
@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = {
|
|||
.release = virtio_transport_release,
|
||||
.connect = virtio_transport_connect,
|
||||
.shutdown = virtio_transport_shutdown,
|
||||
.cancel_pkt = vhost_transport_cancel_pkt,
|
||||
|
||||
.dgram_enqueue = virtio_transport_dgram_enqueue,
|
||||
.dgram_dequeue = virtio_transport_dgram_dequeue,
|
||||
|
|
|
@ -20,6 +20,8 @@ struct sock_exterr_skb {
|
|||
struct sock_extended_err ee;
|
||||
u16 addr_offset;
|
||||
__be16 port;
|
||||
u8 opt_stats:1,
|
||||
unused:7;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -476,6 +476,7 @@ enum {
|
|||
enum {
|
||||
MLX4_INTERFACE_STATE_UP = 1 << 0,
|
||||
MLX4_INTERFACE_STATE_DELETION = 1 << 1,
|
||||
MLX4_INTERFACE_STATE_NOWAIT = 1 << 2,
|
||||
};
|
||||
|
||||
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
|
||||
|
|
|
@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
|
|||
struct virtio_vsock_hdr hdr;
|
||||
struct work_struct work;
|
||||
struct list_head list;
|
||||
/* socket refcnt not held, only use for cancellation */
|
||||
struct vsock_sock *vsk;
|
||||
void *buf;
|
||||
u32 len;
|
||||
u32 off;
|
||||
|
@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
|
|||
|
||||
struct virtio_vsock_pkt_info {
|
||||
u32 remote_cid, remote_port;
|
||||
struct vsock_sock *vsk;
|
||||
struct msghdr *msg;
|
||||
u32 pkt_len;
|
||||
u16 type;
|
||||
|
|
|
@ -100,6 +100,9 @@ struct vsock_transport {
|
|||
void (*destruct)(struct vsock_sock *);
|
||||
void (*release)(struct vsock_sock *);
|
||||
|
||||
/* Cancel all pending packets sent on vsock. */
|
||||
int (*cancel_pkt)(struct vsock_sock *vsk);
|
||||
|
||||
/* Connections. */
|
||||
int (*connect)(struct vsock_sock *);
|
||||
|
||||
|
|
|
@ -244,7 +244,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
|
|||
u32 seq);
|
||||
|
||||
/* Fake conntrack entry for untracked connections */
|
||||
DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
|
||||
DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
|
||||
static inline struct nf_conn *nf_ct_untracked_get(void)
|
||||
{
|
||||
return raw_cpu_ptr(&nf_conntrack_untracked);
|
||||
|
|
|
@ -103,6 +103,35 @@ struct nft_regs {
|
|||
};
|
||||
};
|
||||
|
||||
/* Store/load an u16 or u8 integer to/from the u32 data register.
|
||||
*
|
||||
* Note, when using concatenations, register allocation happens at 32-bit
|
||||
* level. So for store instruction, pad the rest part with zero to avoid
|
||||
* garbage values.
|
||||
*/
|
||||
|
||||
static inline void nft_reg_store16(u32 *dreg, u16 val)
|
||||
{
|
||||
*dreg = 0;
|
||||
*(u16 *)dreg = val;
|
||||
}
|
||||
|
||||
static inline void nft_reg_store8(u32 *dreg, u8 val)
|
||||
{
|
||||
*dreg = 0;
|
||||
*(u8 *)dreg = val;
|
||||
}
|
||||
|
||||
static inline u16 nft_reg_load16(u32 *sreg)
|
||||
{
|
||||
return *(u16 *)sreg;
|
||||
}
|
||||
|
||||
static inline u8 nft_reg_load8(u32 *sreg)
|
||||
{
|
||||
return *(u8 *)sreg;
|
||||
}
|
||||
|
||||
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
|
||||
unsigned int len)
|
||||
{
|
||||
|
@ -203,7 +232,6 @@ struct nft_set_elem {
|
|||
struct nft_set;
|
||||
struct nft_set_iter {
|
||||
u8 genmask;
|
||||
bool flush;
|
||||
unsigned int count;
|
||||
unsigned int skip;
|
||||
int err;
|
||||
|
|
|
@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
|
|||
struct sk_buff *skb,
|
||||
const struct nf_hook_state *state)
|
||||
{
|
||||
unsigned int flags = IP6_FH_F_AUTH;
|
||||
int protohdr, thoff = 0;
|
||||
unsigned short frag_off;
|
||||
|
||||
nft_set_pktinfo(pkt, skb, state);
|
||||
|
||||
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
|
||||
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
|
||||
if (protohdr < 0) {
|
||||
nft_set_pktinfo_proto_unspec(pkt, skb);
|
||||
return;
|
||||
|
@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
|
|||
const struct nf_hook_state *state)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
unsigned int flags = IP6_FH_F_AUTH;
|
||||
struct ipv6hdr *ip6h, _ip6h;
|
||||
unsigned int thoff = 0;
|
||||
unsigned short frag_off;
|
||||
|
@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
|
|||
if (pkt_len + sizeof(*ip6h) > skb->len)
|
||||
return -1;
|
||||
|
||||
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
|
||||
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
|
||||
if (protohdr < 0)
|
||||
return -1;
|
||||
|
||||
|
|
|
@ -83,6 +83,7 @@ struct sctp_bind_addr;
|
|||
struct sctp_ulpq;
|
||||
struct sctp_ep_common;
|
||||
struct crypto_shash;
|
||||
struct sctp_stream;
|
||||
|
||||
|
||||
#include <net/sctp/tsnmap.h>
|
||||
|
@ -753,6 +754,8 @@ struct sctp_transport {
|
|||
/* Is the Path MTU update pending on this tranport */
|
||||
pmtu_pending:1,
|
||||
|
||||
dst_pending_confirm:1, /* need to confirm neighbour */
|
||||
|
||||
/* Has this transport moved the ctsn since we last sacked */
|
||||
sack_generation:1;
|
||||
u32 dst_cookie;
|
||||
|
@ -806,8 +809,6 @@ struct sctp_transport {
|
|||
|
||||
__u32 burst_limited; /* Holds old cwnd when max.burst is applied */
|
||||
|
||||
__u32 dst_pending_confirm; /* need to confirm neighbour */
|
||||
|
||||
/* Destination */
|
||||
struct dst_entry *dst;
|
||||
/* Source address. */
|
||||
|
|
|
@ -30,18 +30,12 @@ struct bpf_htab {
|
|||
struct pcpu_freelist freelist;
|
||||
struct bpf_lru lru;
|
||||
};
|
||||
void __percpu *extra_elems;
|
||||
struct htab_elem *__percpu *extra_elems;
|
||||
atomic_t count; /* number of elements in this hashtable */
|
||||
u32 n_buckets; /* number of hash buckets */
|
||||
u32 elem_size; /* size of each element in bytes */
|
||||
};
|
||||
|
||||
enum extra_elem_state {
|
||||
HTAB_NOT_AN_EXTRA_ELEM = 0,
|
||||
HTAB_EXTRA_ELEM_FREE,
|
||||
HTAB_EXTRA_ELEM_USED
|
||||
};
|
||||
|
||||
/* each htab element is struct htab_elem + key + value */
|
||||
struct htab_elem {
|
||||
union {
|
||||
|
@ -56,7 +50,6 @@ struct htab_elem {
|
|||
};
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
enum extra_elem_state state;
|
||||
struct bpf_lru_node lru_node;
|
||||
};
|
||||
u32 hash;
|
||||
|
@ -77,6 +70,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
|
|||
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
|
||||
}
|
||||
|
||||
static bool htab_is_prealloc(const struct bpf_htab *htab)
|
||||
{
|
||||
return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
|
||||
}
|
||||
|
||||
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
|
||||
void __percpu *pptr)
|
||||
{
|
||||
|
@ -128,17 +126,20 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
|
|||
|
||||
static int prealloc_init(struct bpf_htab *htab)
|
||||
{
|
||||
u32 num_entries = htab->map.max_entries;
|
||||
int err = -ENOMEM, i;
|
||||
|
||||
htab->elems = bpf_map_area_alloc(htab->elem_size *
|
||||
htab->map.max_entries);
|
||||
if (!htab_is_percpu(htab) && !htab_is_lru(htab))
|
||||
num_entries += num_possible_cpus();
|
||||
|
||||
htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries);
|
||||
if (!htab->elems)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!htab_is_percpu(htab))
|
||||
goto skip_percpu_elems;
|
||||
|
||||
for (i = 0; i < htab->map.max_entries; i++) {
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
u32 size = round_up(htab->map.value_size, 8);
|
||||
void __percpu *pptr;
|
||||
|
||||
|
@ -166,11 +167,11 @@ static int prealloc_init(struct bpf_htab *htab)
|
|||
if (htab_is_lru(htab))
|
||||
bpf_lru_populate(&htab->lru, htab->elems,
|
||||
offsetof(struct htab_elem, lru_node),
|
||||
htab->elem_size, htab->map.max_entries);
|
||||
htab->elem_size, num_entries);
|
||||
else
|
||||
pcpu_freelist_populate(&htab->freelist,
|
||||
htab->elems + offsetof(struct htab_elem, fnode),
|
||||
htab->elem_size, htab->map.max_entries);
|
||||
htab->elem_size, num_entries);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -191,16 +192,22 @@ static void prealloc_destroy(struct bpf_htab *htab)
|
|||
|
||||
static int alloc_extra_elems(struct bpf_htab *htab)
|
||||
{
|
||||
void __percpu *pptr;
|
||||
struct htab_elem *__percpu *pptr, *l_new;
|
||||
struct pcpu_freelist_node *l;
|
||||
int cpu;
|
||||
|
||||
pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
|
||||
pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
|
||||
GFP_USER | __GFP_NOWARN);
|
||||
if (!pptr)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
|
||||
HTAB_EXTRA_ELEM_FREE;
|
||||
l = pcpu_freelist_pop(&htab->freelist);
|
||||
/* pop will succeed, since prealloc_init()
|
||||
* preallocated extra num_possible_cpus elements
|
||||
*/
|
||||
l_new = container_of(l, struct htab_elem, fnode);
|
||||
*per_cpu_ptr(pptr, cpu) = l_new;
|
||||
}
|
||||
htab->extra_elems = pptr;
|
||||
return 0;
|
||||
|
@ -342,25 +349,25 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
raw_spin_lock_init(&htab->buckets[i].lock);
|
||||
}
|
||||
|
||||
if (!percpu && !lru) {
|
||||
/* lru itself can remove the least used element, so
|
||||
* there is no need for an extra elem during map_update.
|
||||
*/
|
||||
err = alloc_extra_elems(htab);
|
||||
if (err)
|
||||
goto free_buckets;
|
||||
}
|
||||
|
||||
if (prealloc) {
|
||||
err = prealloc_init(htab);
|
||||
if (err)
|
||||
goto free_extra_elems;
|
||||
goto free_buckets;
|
||||
|
||||
if (!percpu && !lru) {
|
||||
/* lru itself can remove the least used element, so
|
||||
* there is no need for an extra elem during map_update.
|
||||
*/
|
||||
err = alloc_extra_elems(htab);
|
||||
if (err)
|
||||
goto free_prealloc;
|
||||
}
|
||||
}
|
||||
|
||||
return &htab->map;
|
||||
|
||||
free_extra_elems:
|
||||
free_percpu(htab->extra_elems);
|
||||
free_prealloc:
|
||||
prealloc_destroy(htab);
|
||||
free_buckets:
|
||||
bpf_map_area_free(htab->buckets);
|
||||
free_htab:
|
||||
|
@ -575,12 +582,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
|
|||
|
||||
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
|
||||
{
|
||||
if (l->state == HTAB_EXTRA_ELEM_USED) {
|
||||
l->state = HTAB_EXTRA_ELEM_FREE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
|
||||
if (htab_is_prealloc(htab)) {
|
||||
pcpu_freelist_push(&htab->freelist, &l->fnode);
|
||||
} else {
|
||||
atomic_dec(&htab->count);
|
||||
|
@ -610,47 +612,43 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
|
|||
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
||||
void *value, u32 key_size, u32 hash,
|
||||
bool percpu, bool onallcpus,
|
||||
bool old_elem_exists)
|
||||
struct htab_elem *old_elem)
|
||||
{
|
||||
u32 size = htab->map.value_size;
|
||||
bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
|
||||
struct htab_elem *l_new;
|
||||
bool prealloc = htab_is_prealloc(htab);
|
||||
struct htab_elem *l_new, **pl_new;
|
||||
void __percpu *pptr;
|
||||
int err = 0;
|
||||
|
||||
if (prealloc) {
|
||||
struct pcpu_freelist_node *l;
|
||||
|
||||
l = pcpu_freelist_pop(&htab->freelist);
|
||||
if (!l)
|
||||
err = -E2BIG;
|
||||
else
|
||||
l_new = container_of(l, struct htab_elem, fnode);
|
||||
} else {
|
||||
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
|
||||
atomic_dec(&htab->count);
|
||||
err = -E2BIG;
|
||||
if (old_elem) {
|
||||
/* if we're updating the existing element,
|
||||
* use per-cpu extra elems to avoid freelist_pop/push
|
||||
*/
|
||||
pl_new = this_cpu_ptr(htab->extra_elems);
|
||||
l_new = *pl_new;
|
||||
*pl_new = old_elem;
|
||||
} else {
|
||||
l_new = kmalloc(htab->elem_size,
|
||||
GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!l_new)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
struct pcpu_freelist_node *l;
|
||||
|
||||
l = pcpu_freelist_pop(&htab->freelist);
|
||||
if (!l)
|
||||
return ERR_PTR(-E2BIG);
|
||||
l_new = container_of(l, struct htab_elem, fnode);
|
||||
}
|
||||
}
|
||||
|
||||
if (err) {
|
||||
if (!old_elem_exists)
|
||||
return ERR_PTR(err);
|
||||
|
||||
/* if we're updating the existing element and the hash table
|
||||
* is full, use per-cpu extra elems
|
||||
*/
|
||||
l_new = this_cpu_ptr(htab->extra_elems);
|
||||
if (l_new->state != HTAB_EXTRA_ELEM_FREE)
|
||||
return ERR_PTR(-E2BIG);
|
||||
l_new->state = HTAB_EXTRA_ELEM_USED;
|
||||
} else {
|
||||
l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
|
||||
if (atomic_inc_return(&htab->count) > htab->map.max_entries)
|
||||
if (!old_elem) {
|
||||
/* when map is full and update() is replacing
|
||||
* old element, it's ok to allocate, since
|
||||
* old element will be freed immediately.
|
||||
* Otherwise return an error
|
||||
*/
|
||||
atomic_dec(&htab->count);
|
||||
return ERR_PTR(-E2BIG);
|
||||
}
|
||||
l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!l_new)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
memcpy(l_new->key, key, key_size);
|
||||
|
@ -731,7 +729,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
goto err;
|
||||
|
||||
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
|
||||
!!l_old);
|
||||
l_old);
|
||||
if (IS_ERR(l_new)) {
|
||||
/* all pre-allocated elements are in use or memory exhausted */
|
||||
ret = PTR_ERR(l_new);
|
||||
|
@ -744,7 +742,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
hlist_nulls_add_head_rcu(&l_new->hash_node, head);
|
||||
if (l_old) {
|
||||
hlist_nulls_del_rcu(&l_old->hash_node);
|
||||
free_htab_elem(htab, l_old);
|
||||
if (!htab_is_prealloc(htab))
|
||||
free_htab_elem(htab, l_old);
|
||||
}
|
||||
ret = 0;
|
||||
err:
|
||||
|
@ -856,7 +855,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
|||
value, onallcpus);
|
||||
} else {
|
||||
l_new = alloc_htab_elem(htab, key, value, key_size,
|
||||
hash, true, onallcpus, false);
|
||||
hash, true, onallcpus, NULL);
|
||||
if (IS_ERR(l_new)) {
|
||||
ret = PTR_ERR(l_new);
|
||||
goto err;
|
||||
|
@ -1024,8 +1023,7 @@ static void delete_all_elements(struct bpf_htab *htab)
|
|||
|
||||
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
|
||||
hlist_nulls_del_rcu(&l->hash_node);
|
||||
if (l->state != HTAB_EXTRA_ELEM_USED)
|
||||
htab_elem_free(htab, l);
|
||||
htab_elem_free(htab, l);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1045,7 +1043,7 @@ static void htab_map_free(struct bpf_map *map)
|
|||
* not have executed. Wait for them.
|
||||
*/
|
||||
rcu_barrier();
|
||||
if (htab->map.map_flags & BPF_F_NO_PREALLOC)
|
||||
if (!htab_is_prealloc(htab))
|
||||
delete_all_elements(htab);
|
||||
else
|
||||
prealloc_destroy(htab);
|
||||
|
|
|
@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
|
|||
batadv_iv_ogm_schedule(hard_iface);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_iv_init_sel_class - initialize GW selection class
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
*/
|
||||
static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
|
||||
{
|
||||
/* set default TQ difference threshold to 20 */
|
||||
atomic_set(&bat_priv->gw.sel_class, 20);
|
||||
}
|
||||
|
||||
static struct batadv_gw_node *
|
||||
batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
|
||||
{
|
||||
|
@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
|
|||
.del_if = batadv_iv_ogm_orig_del_if,
|
||||
},
|
||||
.gw = {
|
||||
.init_sel_class = batadv_iv_init_sel_class,
|
||||
.get_best_gw_node = batadv_iv_gw_get_best_gw_node,
|
||||
.is_eligible = batadv_iv_gw_is_eligible,
|
||||
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
|
||||
|
|
|
@ -668,6 +668,16 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_v_init_sel_class - initialize GW selection class
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
*/
|
||||
static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
|
||||
{
|
||||
/* set default throughput difference threshold to 5Mbps */
|
||||
atomic_set(&bat_priv->gw.sel_class, 50);
|
||||
}
|
||||
|
||||
static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
|
||||
char *buff, size_t count)
|
||||
{
|
||||
|
@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
|
|||
.dump = batadv_v_orig_dump,
|
||||
},
|
||||
.gw = {
|
||||
.init_sel_class = batadv_v_init_sel_class,
|
||||
.store_sel_class = batadv_v_store_sel_class,
|
||||
.show_sel_class = batadv_v_show_sel_class,
|
||||
.get_best_gw_node = batadv_v_gw_get_best_gw_node,
|
||||
|
@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* set default throughput difference threshold to 5Mbps */
|
||||
atomic_set(&bat_priv->gw.sel_class, 50);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -404,7 +404,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
|
|||
* batadv_frag_create - create a fragment from skb
|
||||
* @skb: skb to create fragment from
|
||||
* @frag_head: header to use in new fragment
|
||||
* @mtu: size of new fragment
|
||||
* @fragment_size: size of new fragment
|
||||
*
|
||||
* Split the passed skb into two fragments: A new one with size matching the
|
||||
* passed mtu and the old one with the rest. The new skb contains data from the
|
||||
|
@ -414,11 +414,11 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
|
|||
*/
|
||||
static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
|
||||
struct batadv_frag_packet *frag_head,
|
||||
unsigned int mtu)
|
||||
unsigned int fragment_size)
|
||||
{
|
||||
struct sk_buff *skb_fragment;
|
||||
unsigned int header_size = sizeof(*frag_head);
|
||||
unsigned int fragment_size = mtu - header_size;
|
||||
unsigned int mtu = fragment_size + header_size;
|
||||
|
||||
skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
|
||||
if (!skb_fragment)
|
||||
|
@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
|||
struct sk_buff *skb_fragment;
|
||||
unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
|
||||
unsigned int header_size = sizeof(frag_header);
|
||||
unsigned int max_fragment_size, max_packet_size;
|
||||
unsigned int max_fragment_size, num_fragments;
|
||||
int ret;
|
||||
|
||||
/* To avoid merge and refragmentation at next-hops we never send
|
||||
|
@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
|||
*/
|
||||
mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
|
||||
max_fragment_size = mtu - header_size;
|
||||
max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
|
||||
|
||||
if (skb->len == 0 || max_fragment_size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
num_fragments = (skb->len - 1) / max_fragment_size + 1;
|
||||
max_fragment_size = (skb->len - 1) / num_fragments + 1;
|
||||
|
||||
/* Don't even try to fragment, if we need more than 16 fragments */
|
||||
if (skb->len > max_packet_size) {
|
||||
if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
|
||||
ret = -EAGAIN;
|
||||
goto free_skb;
|
||||
}
|
||||
|
@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
|||
goto put_primary_if;
|
||||
}
|
||||
|
||||
skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
|
||||
skb_fragment = batadv_frag_create(skb, &frag_header,
|
||||
max_fragment_size);
|
||||
if (!skb_fragment) {
|
||||
ret = -ENOMEM;
|
||||
goto put_primary_if;
|
||||
|
|
|
@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
|
|||
*/
|
||||
void batadv_gw_init(struct batadv_priv *bat_priv)
|
||||
{
|
||||
if (bat_priv->algo_ops->gw.init_sel_class)
|
||||
bat_priv->algo_ops->gw.init_sel_class(bat_priv);
|
||||
else
|
||||
atomic_set(&bat_priv->gw.sel_class, 1);
|
||||
|
||||
batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
|
||||
NULL, BATADV_TVLV_GW, 1,
|
||||
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
|
||||
|
|
|
@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev)
|
|||
atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
|
||||
#endif
|
||||
atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
|
||||
atomic_set(&bat_priv->gw.sel_class, 20);
|
||||
atomic_set(&bat_priv->gw.bandwidth_down, 100);
|
||||
atomic_set(&bat_priv->gw.bandwidth_up, 20);
|
||||
atomic_set(&bat_priv->orig_interval, 1000);
|
||||
|
|
|
@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops {
|
|||
|
||||
/**
|
||||
* struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
|
||||
* @init_sel_class: initialize GW selection class (optional)
|
||||
* @store_sel_class: parse and stores a new GW selection class (optional)
|
||||
* @show_sel_class: prints the current GW selection class (optional)
|
||||
* @get_best_gw_node: select the best GW from the list of available nodes
|
||||
|
@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops {
|
|||
* @dump: dump gateways to a netlink socket (optional)
|
||||
*/
|
||||
struct batadv_algo_gw_ops {
|
||||
void (*init_sel_class)(struct batadv_priv *bat_priv);
|
||||
ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
|
||||
size_t count);
|
||||
ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
|
||||
|
|
|
@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
|
|||
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
|
||||
struct net_bridge_fdb_entry *fdb;
|
||||
|
||||
WARN_ON_ONCE(!br_hash_lock_held(br));
|
||||
lockdep_assert_held_once(&br->hash_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
fdb = fdb_find_rcu(head, addr, vid);
|
||||
|
|
|
@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
|
|||
|
||||
static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct nf_bridge_info *nf_bridge;
|
||||
unsigned int mtu_reserved;
|
||||
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
|
||||
unsigned int mtu, mtu_reserved;
|
||||
|
||||
mtu_reserved = nf_bridge_mtu_reduction(skb);
|
||||
mtu = skb->dev->mtu;
|
||||
|
||||
if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
|
||||
if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
|
||||
mtu = nf_bridge->frag_max_size;
|
||||
|
||||
if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
|
||||
nf_bridge_info_free(skb);
|
||||
return br_dev_queue_push_xmit(net, sk, skb);
|
||||
}
|
||||
|
||||
nf_bridge = nf_bridge_info_get(skb);
|
||||
|
||||
/* This is wrong! We should preserve the original fragment
|
||||
* boundaries by preserving frag_list rather than refragmenting.
|
||||
*/
|
||||
|
|
|
@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
|||
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
|
||||
const unsigned char *addr, u16 vid);
|
||||
|
||||
static inline bool br_hash_lock_held(struct net_bridge *br)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
return lockdep_is_held(&br->hash_lock);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* br_forward.c */
|
||||
enum br_pkt_type {
|
||||
BR_PKT_UNICAST,
|
||||
|
|
|
@ -71,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void update_classid(struct cgroup_subsys_state *css, void *v)
|
||||
{
|
||||
struct css_task_iter it;
|
||||
struct task_struct *p;
|
||||
|
||||
css_task_iter_start(css, &it);
|
||||
while ((p = css_task_iter_next(&it))) {
|
||||
task_lock(p);
|
||||
iterate_fd(p->files, 0, update_classid_sock, v);
|
||||
task_unlock(p);
|
||||
}
|
||||
css_task_iter_end(&it);
|
||||
}
|
||||
|
||||
static void cgrp_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
struct task_struct *p;
|
||||
|
||||
cgroup_taskset_first(tset, &css);
|
||||
update_classid(css,
|
||||
(void *)(unsigned long)css_cls_state(css)->classid);
|
||||
cgroup_taskset_for_each(p, css, tset) {
|
||||
task_lock(p);
|
||||
iterate_fd(p->files, 0, update_classid_sock,
|
||||
(void *)(unsigned long)css_cls_state(css)->classid);
|
||||
task_unlock(p);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
|
@ -103,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
|
|||
u64 value)
|
||||
{
|
||||
struct cgroup_cls_state *cs = css_cls_state(css);
|
||||
struct css_task_iter it;
|
||||
struct task_struct *p;
|
||||
|
||||
cgroup_sk_alloc_disable();
|
||||
|
||||
cs->classid = (u32)value;
|
||||
|
||||
update_classid(css, (void *)(unsigned long)cs->classid);
|
||||
css_task_iter_start(css, &it);
|
||||
while ((p = css_task_iter_next(&it))) {
|
||||
task_lock(p);
|
||||
iterate_fd(p->files, 0, update_classid_sock,
|
||||
(void *)(unsigned long)cs->classid);
|
||||
task_unlock(p);
|
||||
}
|
||||
css_task_iter_end(&it);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3694,6 +3694,15 @@ static void sock_rmem_free(struct sk_buff *skb)
|
|||
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
|
||||
}
|
||||
|
||||
static void skb_set_err_queue(struct sk_buff *skb)
|
||||
{
|
||||
/* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
|
||||
* So, it is safe to (mis)use it to mark skbs on the error queue.
|
||||
*/
|
||||
skb->pkt_type = PACKET_OUTGOING;
|
||||
BUILD_BUG_ON(PACKET_OUTGOING == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: We dont mem charge error packets (no sk_forward_alloc changes)
|
||||
*/
|
||||
|
@ -3707,6 +3716,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
|||
skb->sk = sk;
|
||||
skb->destructor = sock_rmem_free;
|
||||
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
|
||||
skb_set_err_queue(skb);
|
||||
|
||||
/* before exiting rcu section, make sure dst is refcounted */
|
||||
skb_dst_force(skb);
|
||||
|
@ -3783,16 +3793,20 @@ EXPORT_SYMBOL(skb_clone_sk);
|
|||
|
||||
static void __skb_complete_tx_timestamp(struct sk_buff *skb,
|
||||
struct sock *sk,
|
||||
int tstype)
|
||||
int tstype,
|
||||
bool opt_stats)
|
||||
{
|
||||
struct sock_exterr_skb *serr;
|
||||
int err;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
|
||||
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
memset(serr, 0, sizeof(*serr));
|
||||
serr->ee.ee_errno = ENOMSG;
|
||||
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
|
||||
serr->ee.ee_info = tstype;
|
||||
serr->opt_stats = opt_stats;
|
||||
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
|
||||
serr->ee.ee_data = skb_shinfo(skb)->tskey;
|
||||
if (sk->sk_protocol == IPPROTO_TCP &&
|
||||
|
@ -3833,7 +3847,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
|
|||
*/
|
||||
if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
|
||||
*skb_hwtstamps(skb) = *hwtstamps;
|
||||
__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
|
||||
__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
|
||||
sock_put(sk);
|
||||
}
|
||||
}
|
||||
|
@ -3844,7 +3858,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
|
|||
struct sock *sk, int tstype)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
bool tsonly;
|
||||
bool tsonly, opt_stats = false;
|
||||
|
||||
if (!sk)
|
||||
return;
|
||||
|
@ -3857,9 +3871,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
|
|||
#ifdef CONFIG_INET
|
||||
if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
|
||||
sk->sk_protocol == IPPROTO_TCP &&
|
||||
sk->sk_type == SOCK_STREAM)
|
||||
sk->sk_type == SOCK_STREAM) {
|
||||
skb = tcp_get_timestamping_opt_stats(sk);
|
||||
else
|
||||
opt_stats = true;
|
||||
} else
|
||||
#endif
|
||||
skb = alloc_skb(0, GFP_ATOMIC);
|
||||
} else {
|
||||
|
@ -3878,7 +3893,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
|
|||
else
|
||||
skb->tstamp = ktime_get_real();
|
||||
|
||||
__skb_complete_tx_timestamp(skb, sk, tstype);
|
||||
__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
|
||||
|
||||
|
|
|
@ -1442,6 +1442,11 @@ static void __sk_destruct(struct rcu_head *head)
|
|||
pr_debug("%s: optmem leakage (%d bytes) detected\n",
|
||||
__func__, atomic_read(&sk->sk_omem_alloc));
|
||||
|
||||
if (sk->sk_frag.page) {
|
||||
put_page(sk->sk_frag.page);
|
||||
sk->sk_frag.page = NULL;
|
||||
}
|
||||
|
||||
if (sk->sk_peer_cred)
|
||||
put_cred(sk->sk_peer_cred);
|
||||
put_pid(sk->sk_peer_pid);
|
||||
|
@ -1539,6 +1544,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
is_charged = sk_filter_charge(newsk, filter);
|
||||
|
||||
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
|
||||
/* We need to make sure that we don't uncharge the new
|
||||
* socket if we couldn't charge it in the first place
|
||||
* as otherwise we uncharge the parent's filter.
|
||||
*/
|
||||
if (!is_charged)
|
||||
RCU_INIT_POINTER(newsk->sk_filter, NULL);
|
||||
sk_free_unlock_clone(newsk);
|
||||
newsk = NULL;
|
||||
goto out;
|
||||
|
@ -2787,11 +2798,6 @@ void sk_common_release(struct sock *sk)
|
|||
|
||||
sk_refcnt_debug_release(sk);
|
||||
|
||||
if (sk->sk_frag.page) {
|
||||
put_page(sk->sk_frag.page);
|
||||
sk->sk_frag.page = NULL;
|
||||
}
|
||||
|
||||
sock_put(sk);
|
||||
}
|
||||
EXPORT_SYMBOL(sk_common_release);
|
||||
|
|
|
@ -1083,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb)
|
|||
|
||||
net = sock_net(skb->sk);
|
||||
nlh = nlmsg_hdr(skb);
|
||||
if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
|
||||
if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
|
||||
skb->len < nlh->nlmsg_len ||
|
||||
nlmsg_len(nlh) < sizeof(*frn))
|
||||
return;
|
||||
|
||||
|
|
|
@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
|
|||
qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
|
||||
net = container_of(qp->q.net, struct net, ipv4.frags);
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&qp->q.lock);
|
||||
|
||||
if (qp->q.flags & INET_FRAG_COMPLETE)
|
||||
|
@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
|
|||
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
|
||||
|
||||
if (!inet_frag_evicting(&qp->q)) {
|
||||
struct sk_buff *head = qp->q.fragments;
|
||||
struct sk_buff *clone, *head = qp->q.fragments;
|
||||
const struct iphdr *iph;
|
||||
int err;
|
||||
|
||||
|
@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
|
|||
if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
|
||||
goto out;
|
||||
|
||||
rcu_read_lock();
|
||||
head->dev = dev_get_by_index_rcu(net, qp->iif);
|
||||
if (!head->dev)
|
||||
goto out_rcu_unlock;
|
||||
goto out;
|
||||
|
||||
|
||||
/* skb has no dst, perform route lookup again */
|
||||
iph = ip_hdr(head);
|
||||
err = ip_route_input_noref(head, iph->daddr, iph->saddr,
|
||||
iph->tos, head->dev);
|
||||
if (err)
|
||||
goto out_rcu_unlock;
|
||||
goto out;
|
||||
|
||||
/* Only an end host needs to send an ICMP
|
||||
* "Fragment Reassembly Timeout" message, per RFC792.
|
||||
*/
|
||||
if (frag_expire_skip_icmp(qp->user) &&
|
||||
(skb_rtable(head)->rt_type != RTN_LOCAL))
|
||||
goto out_rcu_unlock;
|
||||
goto out;
|
||||
|
||||
clone = skb_clone(head, GFP_ATOMIC);
|
||||
|
||||
/* Send an ICMP "Fragment Reassembly Timeout" message. */
|
||||
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
|
||||
out_rcu_unlock:
|
||||
rcu_read_unlock();
|
||||
if (clone) {
|
||||
spin_unlock(&qp->q.lock);
|
||||
icmp_send(clone, ICMP_TIME_EXCEEDED,
|
||||
ICMP_EXC_FRAGTIME, 0);
|
||||
consume_skb(clone);
|
||||
goto out_rcu_unlock;
|
||||
}
|
||||
}
|
||||
out:
|
||||
spin_unlock(&qp->q.lock);
|
||||
out_rcu_unlock:
|
||||
rcu_read_unlock();
|
||||
ipq_put(qp);
|
||||
}
|
||||
|
||||
|
|
|
@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
|
|||
if (skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr))
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
|
||||
return NF_ACCEPT;
|
||||
|
||||
return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
|
|||
/* maniptype == SRC for postrouting. */
|
||||
enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
|
||||
|
||||
/* We never see fragments: conntrack defrags on pre-routing
|
||||
* and local-out, and nf_nat_out protects post-routing.
|
||||
*/
|
||||
NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
/* Can't track? It's not due to stress, or conntrack would
|
||||
* have dropped it. Hence it's the user's responsibilty to
|
||||
|
|
|
@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
|
|||
memset(&range, 0, sizeof(range));
|
||||
range.flags = priv->flags;
|
||||
if (priv->sreg_proto_min) {
|
||||
range.min_proto.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_min];
|
||||
range.max_proto.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_max];
|
||||
range.min_proto.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_min]);
|
||||
range.max_proto.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_max]);
|
||||
}
|
||||
regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
|
||||
&range, nft_out(pkt));
|
||||
|
|
|
@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
|
|||
|
||||
memset(&mr, 0, sizeof(mr));
|
||||
if (priv->sreg_proto_min) {
|
||||
mr.range[0].min.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_min];
|
||||
mr.range[0].max.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_max];
|
||||
mr.range[0].min.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_min]);
|
||||
mr.range[0].max.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_max]);
|
||||
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
|
||||
}
|
||||
|
||||
|
|
|
@ -2770,7 +2770,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
|||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
u32 now = tcp_time_stamp, intv;
|
||||
u32 now, intv;
|
||||
u64 rate64;
|
||||
bool slow;
|
||||
u32 rate;
|
||||
|
@ -2839,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
|||
info->tcpi_retrans = tp->retrans_out;
|
||||
info->tcpi_fackets = tp->fackets_out;
|
||||
|
||||
now = tcp_time_stamp;
|
||||
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
|
||||
info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
|
||||
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
|
||||
|
|
|
@ -5541,6 +5541,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
|
|||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
tcp_set_state(sk, TCP_ESTABLISHED);
|
||||
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
|
||||
|
||||
if (skb) {
|
||||
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
|
||||
|
@ -5759,7 +5760,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
|||
* to stand against the temptation 8) --ANK
|
||||
*/
|
||||
inet_csk_schedule_ack(sk);
|
||||
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
|
||||
tcp_enter_quickack_mode(sk);
|
||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
|
||||
TCP_DELACK_MAX, TCP_RTO_MAX);
|
||||
|
|
|
@ -460,6 +460,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
|
|||
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
|
||||
minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
|
||||
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
|
||||
newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
|
||||
|
||||
newtp->packets_out = 0;
|
||||
newtp->retrans_out = 0;
|
||||
|
|
|
@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
|
|||
memset(&range, 0, sizeof(range));
|
||||
range.flags = priv->flags;
|
||||
if (priv->sreg_proto_min) {
|
||||
range.min_proto.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_min];
|
||||
range.max_proto.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_max];
|
||||
range.min_proto.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_min]);
|
||||
range.max_proto.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_max]);
|
||||
}
|
||||
regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
|
||||
nft_out(pkt));
|
||||
|
|
|
@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
|
|||
|
||||
memset(&range, 0, sizeof(range));
|
||||
if (priv->sreg_proto_min) {
|
||||
range.min_proto.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_min],
|
||||
range.max_proto.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_max],
|
||||
range.min_proto.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_min]);
|
||||
range.max_proto.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_max]);
|
||||
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
|
||||
}
|
||||
|
||||
|
|
|
@ -3423,6 +3423,8 @@ static int rt6_fill_node(struct net *net,
|
|||
}
|
||||
else if (rt->rt6i_flags & RTF_LOCAL)
|
||||
rtm->rtm_type = RTN_LOCAL;
|
||||
else if (rt->rt6i_flags & RTF_ANYCAST)
|
||||
rtm->rtm_type = RTN_ANYCAST;
|
||||
else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
|
||||
rtm->rtm_type = RTN_LOCAL;
|
||||
else
|
||||
|
|
|
@ -1035,6 +1035,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
ipc6.hlimit = -1;
|
||||
ipc6.tclass = -1;
|
||||
ipc6.dontfrag = -1;
|
||||
sockc.tsflags = sk->sk_tsflags;
|
||||
|
||||
/* destination address check */
|
||||
if (sin6) {
|
||||
|
@ -1159,7 +1160,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
|
||||
fl6.flowi6_mark = sk->sk_mark;
|
||||
fl6.flowi6_uid = sk->sk_uid;
|
||||
sockc.tsflags = sk->sk_tsflags;
|
||||
|
||||
if (msg->msg_controllen) {
|
||||
opt = &opt_space;
|
||||
|
|
|
@ -1269,6 +1269,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
|
|||
{
|
||||
struct mpls_route __rcu **platform_label;
|
||||
struct net *net = dev_net(dev);
|
||||
unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
|
||||
unsigned int alive;
|
||||
unsigned index;
|
||||
|
||||
platform_label = rtnl_dereference(net->mpls.platform_label);
|
||||
|
@ -1278,9 +1280,11 @@ static void mpls_ifdown(struct net_device *dev, int event)
|
|||
if (!rt)
|
||||
continue;
|
||||
|
||||
alive = 0;
|
||||
change_nexthops(rt) {
|
||||
if (rtnl_dereference(nh->nh_dev) != dev)
|
||||
continue;
|
||||
goto next;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_DOWN:
|
||||
case NETDEV_UNREGISTER:
|
||||
|
@ -1288,13 +1292,16 @@ static void mpls_ifdown(struct net_device *dev, int event)
|
|||
/* fall through */
|
||||
case NETDEV_CHANGE:
|
||||
nh->nh_flags |= RTNH_F_LINKDOWN;
|
||||
if (event != NETDEV_UNREGISTER)
|
||||
ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
|
||||
break;
|
||||
}
|
||||
if (event == NETDEV_UNREGISTER)
|
||||
RCU_INIT_POINTER(nh->nh_dev, NULL);
|
||||
next:
|
||||
if (!(nh->nh_flags & nh_flags))
|
||||
alive++;
|
||||
} endfor_nexthops(rt);
|
||||
|
||||
WRITE_ONCE(rt->rt_nhn_alive, alive);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
|
|||
unsigned int nf_conntrack_max __read_mostly;
|
||||
seqcount_t nf_conntrack_generation __read_mostly;
|
||||
|
||||
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
|
||||
/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
|
||||
* for the nfctinfo. We cheat by (ab)using the PER CPU cache line
|
||||
* alignment to enforce this.
|
||||
*/
|
||||
DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
|
||||
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
|
||||
|
||||
static unsigned int nf_conntrack_hash_rnd __read_mostly;
|
||||
|
|
|
@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb,
|
|||
enum nf_nat_manip_type maniptype)
|
||||
{
|
||||
sctp_sctphdr_t *hdr;
|
||||
int hdrsize = 8;
|
||||
|
||||
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
|
||||
/* This could be an inner header returned in imcp packet; in such
|
||||
* cases we cannot update the checksum field since it is outside
|
||||
* of the 8 bytes of transport layer headers we are guaranteed.
|
||||
*/
|
||||
if (skb->len >= hdroff + sizeof(*hdr))
|
||||
hdrsize = sizeof(*hdr);
|
||||
|
||||
if (!skb_make_writable(skb, hdroff + hdrsize))
|
||||
return false;
|
||||
|
||||
hdr = (struct sctphdr *)(skb->data + hdroff);
|
||||
|
@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb,
|
|||
hdr->dest = tuple->dst.u.sctp.port;
|
||||
}
|
||||
|
||||
if (hdrsize < sizeof(*hdr))
|
||||
return true;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL) {
|
||||
hdr->checksum = sctp_compute_cksum(skb, hdroff);
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
|
|
@ -3145,7 +3145,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
iter.count = 0;
|
||||
iter.err = 0;
|
||||
iter.fn = nf_tables_bind_check_setelem;
|
||||
iter.flush = false;
|
||||
|
||||
set->ops->walk(ctx, set, &iter);
|
||||
if (iter.err < 0)
|
||||
|
@ -3399,7 +3398,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
args.iter.count = 0;
|
||||
args.iter.err = 0;
|
||||
args.iter.fn = nf_tables_dump_setelem;
|
||||
args.iter.flush = false;
|
||||
set->ops->walk(&ctx, set, &args.iter);
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
|
@ -3963,7 +3961,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
|
|||
struct nft_set_iter iter = {
|
||||
.genmask = genmask,
|
||||
.fn = nft_flush_set,
|
||||
.flush = true,
|
||||
};
|
||||
set->ops->walk(&ctx, set, &iter);
|
||||
|
||||
|
@ -5114,7 +5111,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
|
|||
iter.count = 0;
|
||||
iter.err = 0;
|
||||
iter.fn = nf_tables_loop_check_setelem;
|
||||
iter.flush = false;
|
||||
|
||||
set->ops->walk(ctx, set, &iter);
|
||||
if (iter.err < 0)
|
||||
|
|
|
@ -83,7 +83,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
|
|||
|
||||
switch (priv->key) {
|
||||
case NFT_CT_DIRECTION:
|
||||
*dest = CTINFO2DIR(ctinfo);
|
||||
nft_reg_store8(dest, CTINFO2DIR(ctinfo));
|
||||
return;
|
||||
case NFT_CT_STATUS:
|
||||
*dest = ct->status;
|
||||
|
@ -151,20 +151,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
|
|||
return;
|
||||
}
|
||||
case NFT_CT_L3PROTOCOL:
|
||||
*dest = nf_ct_l3num(ct);
|
||||
nft_reg_store8(dest, nf_ct_l3num(ct));
|
||||
return;
|
||||
case NFT_CT_PROTOCOL:
|
||||
*dest = nf_ct_protonum(ct);
|
||||
nft_reg_store8(dest, nf_ct_protonum(ct));
|
||||
return;
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
case NFT_CT_ZONE: {
|
||||
const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
|
||||
u16 zoneid;
|
||||
|
||||
if (priv->dir < IP_CT_DIR_MAX)
|
||||
*dest = nf_ct_zone_id(zone, priv->dir);
|
||||
zoneid = nf_ct_zone_id(zone, priv->dir);
|
||||
else
|
||||
*dest = zone->id;
|
||||
zoneid = zone->id;
|
||||
|
||||
nft_reg_store16(dest, zoneid);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -183,10 +185,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
|
|||
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
|
||||
return;
|
||||
case NFT_CT_PROTO_SRC:
|
||||
*dest = (__force __u16)tuple->src.u.all;
|
||||
nft_reg_store16(dest, (__force u16)tuple->src.u.all);
|
||||
return;
|
||||
case NFT_CT_PROTO_DST:
|
||||
*dest = (__force __u16)tuple->dst.u.all;
|
||||
nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
|
@ -205,7 +207,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
|
|||
const struct nft_ct *priv = nft_expr_priv(expr);
|
||||
struct sk_buff *skb = pkt->skb;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
u16 value = regs->data[priv->sreg];
|
||||
u16 value = nft_reg_load16(®s->data[priv->sreg]);
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
|
@ -542,7 +544,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
|
|||
case IP_CT_DIR_REPLY:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
|
|||
*dest = skb->len;
|
||||
break;
|
||||
case NFT_META_PROTOCOL:
|
||||
*dest = 0;
|
||||
*(__be16 *)dest = skb->protocol;
|
||||
nft_reg_store16(dest, (__force u16)skb->protocol);
|
||||
break;
|
||||
case NFT_META_NFPROTO:
|
||||
*dest = nft_pf(pkt);
|
||||
nft_reg_store8(dest, nft_pf(pkt));
|
||||
break;
|
||||
case NFT_META_L4PROTO:
|
||||
if (!pkt->tprot_set)
|
||||
goto err;
|
||||
*dest = pkt->tprot;
|
||||
nft_reg_store8(dest, pkt->tprot);
|
||||
break;
|
||||
case NFT_META_PRIORITY:
|
||||
*dest = skb->priority;
|
||||
|
@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
|
|||
case NFT_META_IIFTYPE:
|
||||
if (in == NULL)
|
||||
goto err;
|
||||
*dest = 0;
|
||||
*(u16 *)dest = in->type;
|
||||
nft_reg_store16(dest, in->type);
|
||||
break;
|
||||
case NFT_META_OIFTYPE:
|
||||
if (out == NULL)
|
||||
goto err;
|
||||
*dest = 0;
|
||||
*(u16 *)dest = out->type;
|
||||
nft_reg_store16(dest, out->type);
|
||||
break;
|
||||
case NFT_META_SKUID:
|
||||
sk = skb_to_full_sk(skb);
|
||||
|
@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr,
|
|||
#endif
|
||||
case NFT_META_PKTTYPE:
|
||||
if (skb->pkt_type != PACKET_LOOPBACK) {
|
||||
*dest = skb->pkt_type;
|
||||
nft_reg_store8(dest, skb->pkt_type);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (nft_pf(pkt)) {
|
||||
case NFPROTO_IPV4:
|
||||
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
|
||||
*dest = PACKET_MULTICAST;
|
||||
nft_reg_store8(dest, PACKET_MULTICAST);
|
||||
else
|
||||
*dest = PACKET_BROADCAST;
|
||||
nft_reg_store8(dest, PACKET_BROADCAST);
|
||||
break;
|
||||
case NFPROTO_IPV6:
|
||||
*dest = PACKET_MULTICAST;
|
||||
nft_reg_store8(dest, PACKET_MULTICAST);
|
||||
break;
|
||||
case NFPROTO_NETDEV:
|
||||
switch (skb->protocol) {
|
||||
|
@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
|
|||
goto err;
|
||||
|
||||
if (ipv4_is_multicast(iph->daddr))
|
||||
*dest = PACKET_MULTICAST;
|
||||
nft_reg_store8(dest, PACKET_MULTICAST);
|
||||
else
|
||||
*dest = PACKET_BROADCAST;
|
||||
nft_reg_store8(dest, PACKET_BROADCAST);
|
||||
|
||||
break;
|
||||
}
|
||||
case htons(ETH_P_IPV6):
|
||||
*dest = PACKET_MULTICAST;
|
||||
nft_reg_store8(dest, PACKET_MULTICAST);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
|
@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
|
|||
{
|
||||
const struct nft_meta *meta = nft_expr_priv(expr);
|
||||
struct sk_buff *skb = pkt->skb;
|
||||
u32 value = regs->data[meta->sreg];
|
||||
u32 *sreg = ®s->data[meta->sreg];
|
||||
u32 value = *sreg;
|
||||
u8 pkt_type;
|
||||
|
||||
switch (meta->key) {
|
||||
case NFT_META_MARK:
|
||||
|
@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
|
|||
skb->priority = value;
|
||||
break;
|
||||
case NFT_META_PKTTYPE:
|
||||
if (skb->pkt_type != value &&
|
||||
skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type))
|
||||
skb->pkt_type = value;
|
||||
pkt_type = nft_reg_load8(sreg);
|
||||
|
||||
if (skb->pkt_type != pkt_type &&
|
||||
skb_pkt_type_ok(pkt_type) &&
|
||||
skb_pkt_type_ok(skb->pkt_type))
|
||||
skb->pkt_type = pkt_type;
|
||||
break;
|
||||
case NFT_META_NFTRACE:
|
||||
skb->nf_trace = !!value;
|
||||
|
|
|
@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
|
|||
}
|
||||
|
||||
if (priv->sreg_proto_min) {
|
||||
range.min_proto.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_min];
|
||||
range.max_proto.all =
|
||||
*(__be16 *)®s->data[priv->sreg_proto_max];
|
||||
range.min_proto.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_min]);
|
||||
range.max_proto.all = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg_proto_max]);
|
||||
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,11 @@
|
|||
#include <linux/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
|
||||
struct nft_bitmap_elem {
|
||||
struct list_head head;
|
||||
struct nft_set_ext ext;
|
||||
};
|
||||
|
||||
/* This bitmap uses two bits to represent one element. These two bits determine
|
||||
* the element state in the current and the future generation.
|
||||
*
|
||||
|
@ -41,13 +46,22 @@
|
|||
* restore its previous state.
|
||||
*/
|
||||
struct nft_bitmap {
|
||||
u16 bitmap_size;
|
||||
u8 bitmap[];
|
||||
struct list_head list;
|
||||
u16 bitmap_size;
|
||||
u8 bitmap[];
|
||||
};
|
||||
|
||||
static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off)
|
||||
static inline void nft_bitmap_location(const struct nft_set *set,
|
||||
const void *key,
|
||||
u32 *idx, u32 *off)
|
||||
{
|
||||
u32 k = (key << 1);
|
||||
u32 k;
|
||||
|
||||
if (set->klen == 2)
|
||||
k = *(u16 *)key;
|
||||
else
|
||||
k = *(u8 *)key;
|
||||
k <<= 1;
|
||||
|
||||
*idx = k / BITS_PER_BYTE;
|
||||
*off = k % BITS_PER_BYTE;
|
||||
|
@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
|
|||
u8 genmask = nft_genmask_cur(net);
|
||||
u32 idx, off;
|
||||
|
||||
nft_bitmap_location(*key, &idx, &off);
|
||||
nft_bitmap_location(set, key, &idx, &off);
|
||||
|
||||
return nft_bitmap_active(priv->bitmap, idx, off, genmask);
|
||||
}
|
||||
|
||||
static struct nft_bitmap_elem *
|
||||
nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
|
||||
u8 genmask)
|
||||
{
|
||||
const struct nft_bitmap *priv = nft_set_priv(set);
|
||||
struct nft_bitmap_elem *be;
|
||||
|
||||
list_for_each_entry_rcu(be, &priv->list, head) {
|
||||
if (memcmp(nft_set_ext_key(&be->ext),
|
||||
nft_set_ext_key(&this->ext), set->klen) ||
|
||||
!nft_set_elem_active(&be->ext, genmask))
|
||||
continue;
|
||||
|
||||
return be;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
|
||||
const struct nft_set_elem *elem,
|
||||
struct nft_set_ext **_ext)
|
||||
struct nft_set_ext **ext)
|
||||
{
|
||||
struct nft_bitmap *priv = nft_set_priv(set);
|
||||
struct nft_set_ext *ext = elem->priv;
|
||||
struct nft_bitmap_elem *new = elem->priv, *be;
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
u32 idx, off;
|
||||
|
||||
nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
|
||||
if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
|
||||
be = nft_bitmap_elem_find(set, new, genmask);
|
||||
if (be) {
|
||||
*ext = &be->ext;
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off);
|
||||
/* Enter 01 state. */
|
||||
priv->bitmap[idx] |= (genmask << off);
|
||||
list_add_tail_rcu(&new->head, &priv->list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net,
|
|||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_bitmap *priv = nft_set_priv(set);
|
||||
struct nft_set_ext *ext = elem->priv;
|
||||
struct nft_bitmap_elem *be = elem->priv;
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
u32 idx, off;
|
||||
|
||||
nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
|
||||
nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
|
||||
/* Enter 00 state. */
|
||||
priv->bitmap[idx] &= ~(genmask << off);
|
||||
list_del_rcu(&be->head);
|
||||
}
|
||||
|
||||
static void nft_bitmap_activate(const struct net *net,
|
||||
|
@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net,
|
|||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_bitmap *priv = nft_set_priv(set);
|
||||
struct nft_set_ext *ext = elem->priv;
|
||||
struct nft_bitmap_elem *be = elem->priv;
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
u32 idx, off;
|
||||
|
||||
nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
|
||||
nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
|
||||
/* Enter 11 state. */
|
||||
priv->bitmap[idx] |= (genmask << off);
|
||||
nft_set_elem_change_active(net, set, &be->ext);
|
||||
}
|
||||
|
||||
static bool nft_bitmap_flush(const struct net *net,
|
||||
const struct nft_set *set, void *ext)
|
||||
const struct nft_set *set, void *_be)
|
||||
{
|
||||
struct nft_bitmap *priv = nft_set_priv(set);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
struct nft_bitmap_elem *be = _be;
|
||||
u32 idx, off;
|
||||
|
||||
nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
|
||||
nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
|
||||
/* Enter 10 state, similar to deactivation. */
|
||||
priv->bitmap[idx] &= ~(genmask << off);
|
||||
nft_set_elem_change_active(net, set, &be->ext);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_set_ext_tmpl tmpl;
|
||||
struct nft_set_ext *ext;
|
||||
|
||||
nft_set_ext_prepare(&tmpl);
|
||||
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
|
||||
|
||||
ext = kzalloc(tmpl.len, GFP_KERNEL);
|
||||
if (!ext)
|
||||
return NULL;
|
||||
|
||||
nft_set_ext_init(ext, &tmpl);
|
||||
memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen);
|
||||
|
||||
return ext;
|
||||
}
|
||||
|
||||
static void *nft_bitmap_deactivate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_bitmap *priv = nft_set_priv(set);
|
||||
struct nft_bitmap_elem *this = elem->priv, *be;
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
struct nft_set_ext *ext;
|
||||
u32 idx, off, key = 0;
|
||||
u32 idx, off;
|
||||
|
||||
memcpy(&key, elem->key.val.data, set->klen);
|
||||
nft_bitmap_location(key, &idx, &off);
|
||||
nft_bitmap_location(set, elem->key.val.data, &idx, &off);
|
||||
|
||||
if (!nft_bitmap_active(priv->bitmap, idx, off, genmask))
|
||||
return NULL;
|
||||
|
||||
/* We have no real set extension since this is a bitmap, allocate this
|
||||
* dummy object that is released from the commit/abort path.
|
||||
*/
|
||||
ext = nft_bitmap_ext_alloc(set, elem);
|
||||
if (!ext)
|
||||
be = nft_bitmap_elem_find(set, this, genmask);
|
||||
if (!be)
|
||||
return NULL;
|
||||
|
||||
/* Enter 10 state. */
|
||||
priv->bitmap[idx] &= ~(genmask << off);
|
||||
nft_set_elem_change_active(net, set, &be->ext);
|
||||
|
||||
return ext;
|
||||
return be;
|
||||
}
|
||||
|
||||
static void nft_bitmap_walk(const struct nft_ctx *ctx,
|
||||
|
@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
|
|||
struct nft_set_iter *iter)
|
||||
{
|
||||
const struct nft_bitmap *priv = nft_set_priv(set);
|
||||
struct nft_set_ext_tmpl tmpl;
|
||||
struct nft_bitmap_elem *be;
|
||||
struct nft_set_elem elem;
|
||||
struct nft_set_ext *ext;
|
||||
int idx, off;
|
||||
u16 key;
|
||||
|
||||
nft_set_ext_prepare(&tmpl);
|
||||
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
|
||||
list_for_each_entry_rcu(be, &priv->list, head) {
|
||||
if (iter->count < iter->skip)
|
||||
goto cont;
|
||||
if (!nft_set_elem_active(&be->ext, iter->genmask))
|
||||
goto cont;
|
||||
|
||||
for (idx = 0; idx < priv->bitmap_size; idx++) {
|
||||
for (off = 0; off < BITS_PER_BYTE; off += 2) {
|
||||
if (iter->count < iter->skip)
|
||||
goto cont;
|
||||
elem.priv = be;
|
||||
|
||||
if (!nft_bitmap_active(priv->bitmap, idx, off,
|
||||
iter->genmask))
|
||||
goto cont;
|
||||
iter->err = iter->fn(ctx, set, iter, &elem);
|
||||
|
||||
ext = kzalloc(tmpl.len, GFP_KERNEL);
|
||||
if (!ext) {
|
||||
iter->err = -ENOMEM;
|
||||
return;
|
||||
}
|
||||
nft_set_ext_init(ext, &tmpl);
|
||||
key = ((idx * BITS_PER_BYTE) + off) >> 1;
|
||||
memcpy(nft_set_ext_key(ext), &key, set->klen);
|
||||
|
||||
elem.priv = ext;
|
||||
iter->err = iter->fn(ctx, set, iter, &elem);
|
||||
|
||||
/* On set flush, this dummy extension object is released
|
||||
* from the commit/abort path.
|
||||
*/
|
||||
if (!iter->flush)
|
||||
kfree(ext);
|
||||
|
||||
if (iter->err < 0)
|
||||
return;
|
||||
if (iter->err < 0)
|
||||
return;
|
||||
cont:
|
||||
iter->count++;
|
||||
}
|
||||
iter->count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set,
|
|||
{
|
||||
struct nft_bitmap *priv = nft_set_priv(set);
|
||||
|
||||
INIT_LIST_HEAD(&priv->list);
|
||||
priv->bitmap_size = nft_bitmap_size(set->klen);
|
||||
|
||||
return 0;
|
||||
|
@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
|
|||
|
||||
static struct nft_set_ops nft_bitmap_ops __read_mostly = {
|
||||
.privsize = nft_bitmap_privsize,
|
||||
.elemsize = offsetof(struct nft_bitmap_elem, ext),
|
||||
.estimate = nft_bitmap_estimate,
|
||||
.init = nft_bitmap_init,
|
||||
.destroy = nft_bitmap_destroy,
|
||||
|
|
|
@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
|
|||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
|
||||
|
||||
static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
|
||||
|
||||
static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
|
||||
"nlk_cb_mutex-ROUTE",
|
||||
"nlk_cb_mutex-1",
|
||||
"nlk_cb_mutex-USERSOCK",
|
||||
"nlk_cb_mutex-FIREWALL",
|
||||
"nlk_cb_mutex-SOCK_DIAG",
|
||||
"nlk_cb_mutex-NFLOG",
|
||||
"nlk_cb_mutex-XFRM",
|
||||
"nlk_cb_mutex-SELINUX",
|
||||
"nlk_cb_mutex-ISCSI",
|
||||
"nlk_cb_mutex-AUDIT",
|
||||
"nlk_cb_mutex-FIB_LOOKUP",
|
||||
"nlk_cb_mutex-CONNECTOR",
|
||||
"nlk_cb_mutex-NETFILTER",
|
||||
"nlk_cb_mutex-IP6_FW",
|
||||
"nlk_cb_mutex-DNRTMSG",
|
||||
"nlk_cb_mutex-KOBJECT_UEVENT",
|
||||
"nlk_cb_mutex-GENERIC",
|
||||
"nlk_cb_mutex-17",
|
||||
"nlk_cb_mutex-SCSITRANSPORT",
|
||||
"nlk_cb_mutex-ECRYPTFS",
|
||||
"nlk_cb_mutex-RDMA",
|
||||
"nlk_cb_mutex-CRYPTO",
|
||||
"nlk_cb_mutex-SMC",
|
||||
"nlk_cb_mutex-23",
|
||||
"nlk_cb_mutex-24",
|
||||
"nlk_cb_mutex-25",
|
||||
"nlk_cb_mutex-26",
|
||||
"nlk_cb_mutex-27",
|
||||
"nlk_cb_mutex-28",
|
||||
"nlk_cb_mutex-29",
|
||||
"nlk_cb_mutex-30",
|
||||
"nlk_cb_mutex-31",
|
||||
"nlk_cb_mutex-MAX_LINKS"
|
||||
};
|
||||
|
||||
static int netlink_dump(struct sock *sk);
|
||||
static void netlink_skb_destructor(struct sk_buff *skb);
|
||||
|
||||
|
@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
|
|||
} else {
|
||||
nlk->cb_mutex = &nlk->cb_def_mutex;
|
||||
mutex_init(nlk->cb_mutex);
|
||||
lockdep_set_class_and_name(nlk->cb_mutex,
|
||||
nlk_cb_mutex_keys + protocol,
|
||||
nlk_cb_mutex_key_strings[protocol]);
|
||||
}
|
||||
init_waitqueue_head(&nlk->wait);
|
||||
|
||||
|
|
|
@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
|
||||
if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
||||
skb, CTRL_CMD_NEWFAMILY) < 0)
|
||||
skb, CTRL_CMD_NEWFAMILY) < 0) {
|
||||
n--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cb->args[0] = n;
|
||||
|
|
|
@ -604,7 +604,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
|
|||
ipv4 = true;
|
||||
break;
|
||||
case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
|
||||
SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
|
||||
SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
|
||||
nla_get_in6_addr(a), is_mask);
|
||||
ipv6 = true;
|
||||
break;
|
||||
|
@ -665,6 +665,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
|
|||
tun_flags |= TUNNEL_VXLAN_OPT;
|
||||
opts_type = type;
|
||||
break;
|
||||
case OVS_TUNNEL_KEY_ATTR_PAD:
|
||||
break;
|
||||
default:
|
||||
OVS_NLERR(log, "Unknown IP tunnel attribute %d",
|
||||
type);
|
||||
|
|
|
@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
|
|||
rxrpc_conn_retransmit_call(conn, skb);
|
||||
return 0;
|
||||
|
||||
case RXRPC_PACKET_TYPE_BUSY:
|
||||
/* Just ignore BUSY packets for now. */
|
||||
return 0;
|
||||
|
||||
case RXRPC_PACKET_TYPE_ABORT:
|
||||
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
|
||||
&wtmp, sizeof(wtmp)) < 0)
|
||||
|
|
|
@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
|
||||
|
||||
if (p->set_tc_index) {
|
||||
int wlen = skb_network_offset(skb);
|
||||
|
||||
switch (tc_skb_protocol(skb)) {
|
||||
case htons(ETH_P_IP):
|
||||
if (skb_cow_head(skb, sizeof(struct iphdr)))
|
||||
wlen += sizeof(struct iphdr);
|
||||
if (!pskb_may_pull(skb, wlen) ||
|
||||
skb_try_make_writable(skb, wlen))
|
||||
goto drop;
|
||||
|
||||
skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
|
||||
|
@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
break;
|
||||
|
||||
case htons(ETH_P_IPV6):
|
||||
if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
|
||||
wlen += sizeof(struct ipv6hdr);
|
||||
if (!pskb_may_pull(skb, wlen) ||
|
||||
skb_try_make_writable(skb, wlen))
|
||||
goto drop;
|
||||
|
||||
skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
|
||||
|
|
|
@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
|
|||
{
|
||||
struct net *net = sock_net(sk);
|
||||
struct sctp_sock *sp;
|
||||
int i;
|
||||
sctp_paramhdr_t *p;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
/* Retrieve the SCTP per socket area. */
|
||||
sp = sctp_sk((struct sock *)sk);
|
||||
|
@ -264,8 +263,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
|
|||
|
||||
/* AUTH related initializations */
|
||||
INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
|
||||
err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
|
||||
if (err)
|
||||
if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
|
||||
goto fail_init;
|
||||
|
||||
asoc->active_key_id = ep->active_key_id;
|
||||
|
|
|
@ -546,7 +546,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
|
|||
struct sctp_association *asoc = tp->asoc;
|
||||
struct sctp_chunk *chunk, *tmp;
|
||||
int pkt_count, gso = 0;
|
||||
int confirm;
|
||||
struct dst_entry *dst;
|
||||
struct sk_buff *head;
|
||||
struct sctphdr *sh;
|
||||
|
@ -625,13 +624,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
|
|||
asoc->peer.last_sent_to = tp;
|
||||
}
|
||||
head->ignore_df = packet->ipfragok;
|
||||
confirm = tp->dst_pending_confirm;
|
||||
if (confirm)
|
||||
if (tp->dst_pending_confirm)
|
||||
skb_set_dst_pending_confirm(head, 1);
|
||||
/* neighbour should be confirmed on successful transmission or
|
||||
* positive error
|
||||
*/
|
||||
if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm)
|
||||
if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
|
||||
tp->dst_pending_confirm)
|
||||
tp->dst_pending_confirm = 0;
|
||||
|
||||
out:
|
||||
|
|
|
@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
|
|||
}
|
||||
|
||||
static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
|
||||
struct sctp_sndrcvinfo *sinfo,
|
||||
struct list_head *queue, int msg_len)
|
||||
struct sctp_sndrcvinfo *sinfo, int msg_len)
|
||||
{
|
||||
struct sctp_outq *q = &asoc->outqueue;
|
||||
struct sctp_chunk *chk, *temp;
|
||||
|
||||
list_for_each_entry_safe(chk, temp, queue, list) {
|
||||
list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
|
||||
if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
|
||||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
|
||||
continue;
|
||||
|
||||
list_del_init(&chk->list);
|
||||
q->out_qlen -= chk->skb->len;
|
||||
asoc->sent_cnt_removable--;
|
||||
asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
|
||||
|
||||
|
@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
|
|||
return;
|
||||
}
|
||||
|
||||
sctp_prsctp_prune_unsent(asoc, sinfo,
|
||||
&asoc->outqueue.out_chunk_list,
|
||||
msg_len);
|
||||
sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
|
||||
}
|
||||
|
||||
/* Mark all the eligible packets on a transport for retransmission. */
|
||||
|
|
13
net/socket.c
13
net/socket.c
|
@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
}
|
||||
EXPORT_SYMBOL(kernel_sendmsg);
|
||||
|
||||
static bool skb_is_err_queue(const struct sk_buff *skb)
|
||||
{
|
||||
/* pkt_type of skbs enqueued on the error queue are set to
|
||||
* PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
|
||||
* in recvmsg, since skbs received on a local socket will never
|
||||
* have a pkt_type of PACKET_OUTGOING.
|
||||
*/
|
||||
return skb->pkt_type == PACKET_OUTGOING;
|
||||
}
|
||||
|
||||
/*
|
||||
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
|
||||
*/
|
||||
|
@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
|
|||
put_cmsg(msg, SOL_SOCKET,
|
||||
SCM_TIMESTAMPING, sizeof(tss), &tss);
|
||||
|
||||
if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS))
|
||||
if (skb_is_err_queue(skb) && skb->len &&
|
||||
SKB_EXT_ERR(skb)->opt_stats)
|
||||
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
|
||||
skb->len, skb->data);
|
||||
}
|
||||
|
|
|
@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
|
|||
static void tipc_subscrp_timeout(unsigned long data)
|
||||
{
|
||||
struct tipc_subscription *sub = (struct tipc_subscription *)data;
|
||||
struct tipc_subscriber *subscriber = sub->subscriber;
|
||||
|
||||
spin_lock_bh(&subscriber->lock);
|
||||
tipc_nametbl_unsubscribe(sub);
|
||||
spin_unlock_bh(&subscriber->lock);
|
||||
|
||||
/* Notify subscriber of timeout */
|
||||
tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
|
||||
|
@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref)
|
|||
struct tipc_subscriber *subscriber = sub->subscriber;
|
||||
|
||||
spin_lock_bh(&subscriber->lock);
|
||||
tipc_nametbl_unsubscribe(sub);
|
||||
list_del(&sub->subscrp_list);
|
||||
atomic_dec(&tn->subscription_count);
|
||||
spin_unlock_bh(&subscriber->lock);
|
||||
|
@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
|
|||
if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
|
||||
continue;
|
||||
|
||||
tipc_nametbl_unsubscribe(sub);
|
||||
tipc_subscrp_get(sub);
|
||||
spin_unlock_bh(&subscriber->lock);
|
||||
tipc_subscrp_delete(sub);
|
||||
|
|
|
@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
|
|||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
BUG_ON(!atomic_long_read(&u->inflight));
|
||||
BUG_ON(list_empty(&u->link));
|
||||
|
||||
if (atomic_long_dec_and_test(&u->inflight))
|
||||
|
@ -341,6 +342,14 @@ void unix_gc(void)
|
|||
}
|
||||
list_del(&cursor);
|
||||
|
||||
/* Now gc_candidates contains only garbage. Restore original
|
||||
* inflight counters for these as well, and remove the skbuffs
|
||||
* which are creating the cycle(s).
|
||||
*/
|
||||
skb_queue_head_init(&hitlist);
|
||||
list_for_each_entry(u, &gc_candidates, link)
|
||||
scan_children(&u->sk, inc_inflight, &hitlist);
|
||||
|
||||
/* not_cycle_list contains those sockets which do not make up a
|
||||
* cycle. Restore these to the inflight list.
|
||||
*/
|
||||
|
@ -350,14 +359,6 @@ void unix_gc(void)
|
|||
list_move_tail(&u->link, &gc_inflight_list);
|
||||
}
|
||||
|
||||
/* Now gc_candidates contains only garbage. Restore original
|
||||
* inflight counters for these as well, and remove the skbuffs
|
||||
* which are creating the cycle(s).
|
||||
*/
|
||||
skb_queue_head_init(&hitlist);
|
||||
list_for_each_entry(u, &gc_candidates, link)
|
||||
scan_children(&u->sk, inc_inflight, &hitlist);
|
||||
|
||||
spin_unlock(&unix_gc_lock);
|
||||
|
||||
/* Here we are. Hitlist is filled. Die. */
|
||||
|
|
|
@ -1102,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = {
|
|||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
|
||||
{
|
||||
if (!transport->cancel_pkt)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return transport->cancel_pkt(vsk);
|
||||
}
|
||||
|
||||
static void vsock_connect_timeout(struct work_struct *work)
|
||||
{
|
||||
struct sock *sk;
|
||||
struct vsock_sock *vsk;
|
||||
int cancel = 0;
|
||||
|
||||
vsk = container_of(work, struct vsock_sock, dwork.work);
|
||||
sk = sk_vsock(vsk);
|
||||
|
@ -1116,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work)
|
|||
sk->sk_state = SS_UNCONNECTED;
|
||||
sk->sk_err = ETIMEDOUT;
|
||||
sk->sk_error_report(sk);
|
||||
cancel = 1;
|
||||
}
|
||||
release_sock(sk);
|
||||
if (cancel)
|
||||
vsock_transport_cancel_pkt(vsk);
|
||||
|
||||
sock_put(sk);
|
||||
}
|
||||
|
@ -1224,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
|
|||
err = sock_intr_errno(timeout);
|
||||
sk->sk_state = SS_UNCONNECTED;
|
||||
sock->state = SS_UNCONNECTED;
|
||||
vsock_transport_cancel_pkt(vsk);
|
||||
goto out_wait;
|
||||
} else if (timeout == 0) {
|
||||
err = -ETIMEDOUT;
|
||||
sk->sk_state = SS_UNCONNECTED;
|
||||
sock->state = SS_UNCONNECTED;
|
||||
vsock_transport_cancel_pkt(vsk);
|
||||
goto out_wait;
|
||||
}
|
||||
|
||||
|
|
|
@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
|
|||
return len;
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_transport_cancel_pkt(struct vsock_sock *vsk)
|
||||
{
|
||||
struct virtio_vsock *vsock;
|
||||
struct virtio_vsock_pkt *pkt, *n;
|
||||
int cnt = 0;
|
||||
LIST_HEAD(freeme);
|
||||
|
||||
vsock = virtio_vsock_get();
|
||||
if (!vsock) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
spin_lock_bh(&vsock->send_pkt_list_lock);
|
||||
list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
|
||||
if (pkt->vsk != vsk)
|
||||
continue;
|
||||
list_move(&pkt->list, &freeme);
|
||||
}
|
||||
spin_unlock_bh(&vsock->send_pkt_list_lock);
|
||||
|
||||
list_for_each_entry_safe(pkt, n, &freeme, list) {
|
||||
if (pkt->reply)
|
||||
cnt++;
|
||||
list_del(&pkt->list);
|
||||
virtio_transport_free_pkt(pkt);
|
||||
}
|
||||
|
||||
if (cnt) {
|
||||
struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
|
||||
int new_cnt;
|
||||
|
||||
new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
|
||||
if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
|
||||
new_cnt < virtqueue_get_vring_size(rx_vq))
|
||||
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
|
||||
{
|
||||
int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
|
||||
|
@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = {
|
|||
.release = virtio_transport_release,
|
||||
.connect = virtio_transport_connect,
|
||||
.shutdown = virtio_transport_shutdown,
|
||||
.cancel_pkt = virtio_transport_cancel_pkt,
|
||||
|
||||
.dgram_bind = virtio_transport_dgram_bind,
|
||||
.dgram_dequeue = virtio_transport_dgram_dequeue,
|
||||
|
|
|
@ -58,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
|
|||
pkt->len = len;
|
||||
pkt->hdr.len = cpu_to_le32(len);
|
||||
pkt->reply = info->reply;
|
||||
pkt->vsk = info->vsk;
|
||||
|
||||
if (info->msg && len > 0) {
|
||||
pkt->buf = kmalloc(len, GFP_KERNEL);
|
||||
|
@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
|
|||
struct virtio_vsock_pkt_info info = {
|
||||
.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
|
||||
.type = type,
|
||||
.vsk = vsk,
|
||||
};
|
||||
|
||||
return virtio_transport_send_pkt_info(vsk, &info);
|
||||
|
@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
|
|||
struct virtio_vsock_pkt_info info = {
|
||||
.op = VIRTIO_VSOCK_OP_REQUEST,
|
||||
.type = VIRTIO_VSOCK_TYPE_STREAM,
|
||||
.vsk = vsk,
|
||||
};
|
||||
|
||||
return virtio_transport_send_pkt_info(vsk, &info);
|
||||
|
@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
|
|||
VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
|
||||
(mode & SEND_SHUTDOWN ?
|
||||
VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
|
||||
.vsk = vsk,
|
||||
};
|
||||
|
||||
return virtio_transport_send_pkt_info(vsk, &info);
|
||||
|
@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
|
|||
.type = VIRTIO_VSOCK_TYPE_STREAM,
|
||||
.msg = msg,
|
||||
.pkt_len = len,
|
||||
.vsk = vsk,
|
||||
};
|
||||
|
||||
return virtio_transport_send_pkt_info(vsk, &info);
|
||||
|
@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
|
|||
.op = VIRTIO_VSOCK_OP_RST,
|
||||
.type = VIRTIO_VSOCK_TYPE_STREAM,
|
||||
.reply = !!pkt,
|
||||
.vsk = vsk,
|
||||
};
|
||||
|
||||
/* Send RST only if the original pkt is not a RST pkt */
|
||||
|
@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
|
|||
.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
|
||||
.remote_port = le32_to_cpu(pkt->hdr.src_port),
|
||||
.reply = true,
|
||||
.vsk = vsk,
|
||||
};
|
||||
|
||||
return virtio_transport_send_pkt_info(vsk, &info);
|
||||
|
|
|
@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
|
|||
{
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
if (!cb->args[0]) {
|
||||
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
|
||||
genl_family_attrbuf(&nl80211_fam),
|
||||
nl80211_fam.maxattr, nl80211_policy);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
return err;
|
||||
|
||||
*wdev = __cfg80211_wdev_from_attrs(
|
||||
sock_net(skb->sk),
|
||||
genl_family_attrbuf(&nl80211_fam));
|
||||
if (IS_ERR(*wdev)) {
|
||||
err = PTR_ERR(*wdev);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (IS_ERR(*wdev))
|
||||
return PTR_ERR(*wdev);
|
||||
*rdev = wiphy_to_rdev((*wdev)->wiphy);
|
||||
/* 0 is the first index - add 1 to parse only once */
|
||||
cb->args[0] = (*rdev)->wiphy_idx + 1;
|
||||
|
@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
|
|||
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
|
||||
struct wireless_dev *tmp;
|
||||
|
||||
if (!wiphy) {
|
||||
err = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!wiphy)
|
||||
return -ENODEV;
|
||||
*rdev = wiphy_to_rdev(wiphy);
|
||||
*wdev = NULL;
|
||||
|
||||
|
@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
|
|||
}
|
||||
}
|
||||
|
||||
if (!*wdev) {
|
||||
err = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!*wdev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_unlock:
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
|
||||
{
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/* IE validation */
|
||||
|
@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
|
|||
int filter_wiphy = -1;
|
||||
struct cfg80211_registered_device *rdev;
|
||||
struct wireless_dev *wdev;
|
||||
int ret;
|
||||
|
||||
rtnl_lock();
|
||||
if (!cb->args[2]) {
|
||||
struct nl80211_dump_wiphy_state state = {
|
||||
.filter_wiphy = -1,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = nl80211_dump_wiphy_parse(skb, cb, &state);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_unlock;
|
||||
|
||||
filter_wiphy = state.filter_wiphy;
|
||||
|
||||
|
@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
|
|||
wp_idx++;
|
||||
}
|
||||
out:
|
||||
rtnl_unlock();
|
||||
|
||||
cb->args[0] = wp_idx;
|
||||
cb->args[1] = if_idx;
|
||||
|
||||
return skb->len;
|
||||
ret = skb->len;
|
||||
out_unlock:
|
||||
rtnl_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
|
||||
|
@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
|
|||
int sta_idx = cb->args[2];
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
|
||||
if (err)
|
||||
return err;
|
||||
goto out_err;
|
||||
|
||||
if (!wdev->netdev) {
|
||||
err = -EINVAL;
|
||||
|
@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
|
|||
cb->args[2] = sta_idx;
|
||||
err = skb->len;
|
||||
out_err:
|
||||
nl80211_finish_wdev_dump(rdev);
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
|
|||
int path_idx = cb->args[2];
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
|
||||
if (err)
|
||||
return err;
|
||||
goto out_err;
|
||||
|
||||
if (!rdev->ops->dump_mpath) {
|
||||
err = -EOPNOTSUPP;
|
||||
|
@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
|
|||
cb->args[2] = path_idx;
|
||||
err = skb->len;
|
||||
out_err:
|
||||
nl80211_finish_wdev_dump(rdev);
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
|
|||
int path_idx = cb->args[2];
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
|
||||
if (err)
|
||||
return err;
|
||||
goto out_err;
|
||||
|
||||
if (!rdev->ops->dump_mpp) {
|
||||
err = -EOPNOTSUPP;
|
||||
|
@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
|
|||
cb->args[2] = path_idx;
|
||||
err = skb->len;
|
||||
out_err:
|
||||
nl80211_finish_wdev_dump(rdev);
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
int start = cb->args[2], idx = 0;
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
|
||||
if (err)
|
||||
if (err) {
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
wdev_lock(wdev);
|
||||
spin_lock_bh(&rdev->bss_lock);
|
||||
|
@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
wdev_unlock(wdev);
|
||||
|
||||
cb->args[2] = idx;
|
||||
nl80211_finish_wdev_dump(rdev);
|
||||
rtnl_unlock();
|
||||
|
||||
return skb->len;
|
||||
}
|
||||
|
@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
int res;
|
||||
bool radio_stats;
|
||||
|
||||
rtnl_lock();
|
||||
res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
|
||||
if (res)
|
||||
return res;
|
||||
goto out_err;
|
||||
|
||||
/* prepare_wdev_dump parsed the attributes */
|
||||
radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
|
||||
|
@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
cb->args[2] = survey_idx;
|
||||
res = skb->len;
|
||||
out_err:
|
||||
nl80211_finish_wdev_dump(rdev);
|
||||
rtnl_unlock();
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
|
|||
void *data = NULL;
|
||||
unsigned int data_len = 0;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
if (cb->args[0]) {
|
||||
/* subtract the 1 again here */
|
||||
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
|
||||
struct wireless_dev *tmp;
|
||||
|
||||
if (!wiphy) {
|
||||
err = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!wiphy)
|
||||
return -ENODEV;
|
||||
*rdev = wiphy_to_rdev(wiphy);
|
||||
*wdev = NULL;
|
||||
|
||||
|
@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
|
|||
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
|
||||
attrbuf, nl80211_fam.maxattr, nl80211_policy);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
return err;
|
||||
|
||||
if (!attrbuf[NL80211_ATTR_VENDOR_ID] ||
|
||||
!attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
|
||||
err = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
!attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
|
||||
return -EINVAL;
|
||||
|
||||
*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
|
||||
if (IS_ERR(*wdev))
|
||||
*wdev = NULL;
|
||||
|
||||
*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
|
||||
if (IS_ERR(*rdev)) {
|
||||
err = PTR_ERR(*rdev);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (IS_ERR(*rdev))
|
||||
return PTR_ERR(*rdev);
|
||||
|
||||
vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]);
|
||||
subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
|
||||
|
@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
|
|||
if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
|
||||
continue;
|
||||
|
||||
if (!vcmd->dumpit) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!vcmd->dumpit)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
vcmd_idx = i;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vcmd_idx < 0) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (vcmd_idx < 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (attrbuf[NL80211_ATTR_VENDOR_DATA]) {
|
||||
data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]);
|
||||
|
@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
|
|||
|
||||
/* keep rtnl locked in successful case */
|
||||
return 0;
|
||||
out_unlock:
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
|
||||
|
@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
|
|||
int err;
|
||||
struct nlattr *vendor_data;
|
||||
|
||||
rtnl_lock();
|
||||
err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
vcmd_idx = cb->args[2];
|
||||
data = (void *)cb->args[3];
|
||||
|
@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
|
|||
|
||||
if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
|
||||
WIPHY_VENDOR_CMD_NEED_NETDEV)) {
|
||||
if (!wdev)
|
||||
return -EINVAL;
|
||||
if (!wdev) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
|
||||
!wdev->netdev)
|
||||
return -EINVAL;
|
||||
!wdev->netdev) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
|
||||
if (!wdev_running(wdev))
|
||||
return -ENETDOWN;
|
||||
if (!wdev_running(wdev)) {
|
||||
err = -ENETDOWN;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,22 +1,23 @@
|
|||
LIBDIR := ../../../lib
|
||||
BPFOBJ := $(LIBDIR)/bpf/bpf.o
|
||||
BPFDIR := $(LIBDIR)/bpf
|
||||
|
||||
CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ)
|
||||
CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR)
|
||||
LDLIBS += -lcap
|
||||
|
||||
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
|
||||
|
||||
TEST_PROGS := test_kmod.sh
|
||||
|
||||
all: $(TEST_GEN_PROGS)
|
||||
include ../lib.mk
|
||||
|
||||
.PHONY: all clean force
|
||||
BPFOBJ := $(OUTPUT)/bpf.o
|
||||
|
||||
$(TEST_GEN_PROGS): $(BPFOBJ)
|
||||
|
||||
.PHONY: force
|
||||
|
||||
# force a rebuild of BPFOBJ when its dependencies are updated
|
||||
force:
|
||||
|
||||
$(BPFOBJ): force
|
||||
$(MAKE) -C $(dir $(BPFOBJ))
|
||||
|
||||
$(test_objs): $(BPFOBJ)
|
||||
|
||||
include ../lib.mk
|
||||
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
|
||||
|
|
|
@ -80,8 +80,9 @@ static void test_hashmap(int task, void *data)
|
|||
assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
|
||||
key = 2;
|
||||
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
|
||||
key = 1;
|
||||
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
|
||||
key = 3;
|
||||
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
|
||||
errno == E2BIG);
|
||||
|
||||
/* Check that key = 0 doesn't exist. */
|
||||
key = 0;
|
||||
|
@ -110,6 +111,24 @@ static void test_hashmap(int task, void *data)
|
|||
close(fd);
|
||||
}
|
||||
|
||||
static void test_hashmap_sizes(int task, void *data)
|
||||
{
|
||||
int fd, i, j;
|
||||
|
||||
for (i = 1; i <= 512; i <<= 1)
|
||||
for (j = 1; j <= 1 << 18; j <<= 1) {
|
||||
fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
|
||||
2, map_flags);
|
||||
if (fd < 0) {
|
||||
printf("Failed to create hashmap key=%d value=%d '%s'\n",
|
||||
i, j, strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
close(fd);
|
||||
usleep(10); /* give kernel time to destroy */
|
||||
}
|
||||
}
|
||||
|
||||
static void test_hashmap_percpu(int task, void *data)
|
||||
{
|
||||
unsigned int nr_cpus = bpf_num_possible_cpus();
|
||||
|
@ -317,7 +336,10 @@ static void test_arraymap_percpu(int task, void *data)
|
|||
static void test_arraymap_percpu_many_keys(void)
|
||||
{
|
||||
unsigned int nr_cpus = bpf_num_possible_cpus();
|
||||
unsigned int nr_keys = 20000;
|
||||
/* nr_keys is not too large otherwise the test stresses percpu
|
||||
* allocator more than anything else
|
||||
*/
|
||||
unsigned int nr_keys = 2000;
|
||||
long values[nr_cpus];
|
||||
int key, fd, i;
|
||||
|
||||
|
@ -419,6 +441,7 @@ static void test_map_stress(void)
|
|||
{
|
||||
run_parallel(100, test_hashmap, NULL);
|
||||
run_parallel(100, test_hashmap_percpu, NULL);
|
||||
run_parallel(100, test_hashmap_sizes, NULL);
|
||||
|
||||
run_parallel(100, test_arraymap, NULL);
|
||||
run_parallel(100, test_arraymap_percpu, NULL);
|
||||
|
|
Loading…
Reference in New Issue