Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix off by one wrt. indexing when dumping /proc/net/route entries, from Alexander Duyck. 2) Fix lockdep splats in iwlwifi, from Johannes Berg. 3) Cure panic when inserting certain netfilter rules when NFT_SET_HASH is disabled, from Liping Zhang. 4) Memory leak when nft_expr_clone() fails, also from Liping Zhang. 5) Disable UFO when path will apply IPSEC tranformations, from Jakub Sitnicki. 6) Don't bogusly double cwnd in dctcp module, from Florian Westphal. 7) skb_checksum_help() should never actually use the value "0" for the resulting checksum, that has a special meaning, use CSUM_MANGLED_0 instead. From Eric Dumazet. 8) Per-tx/rx queue statistic strings are wrong in qed driver, fix from Yuval MIntz. 9) Fix SCTP reference counting of associations and transports in sctp_diag. From Xin Long. 10) When we hit ip6tunnel_xmit() we could have come from an ipv4 path in a previous layer or similar, so explicitly clear the ipv6 control block in the skb. From Eli Cooper. 11) Fix bogus sleeping inside of inet_wait_for_connect(), from WANG Cong. 12) Correct deivce ID of T6 adapter in cxgb4 driver, from Hariprasad Shenai. 13) Fix potential access past the end of the skb page frag array in tcp_sendmsg(). From Eric Dumazet. 14) 'skb' can legitimately be NULL in inet{,6}_exact_dif_match(). Fix from David Ahern. 15) Don't return an error in tcp_sendmsg() if we wronte any bytes successfully, from Eric Dumazet. 16) Extraneous unlocks in netlink_diag_dump(), we removed the locking but forgot to purge these unlock calls. From Eric Dumazet. 17) Fix memory leak in error path of __genl_register_family(). We leak the attrbuf, from WANG Cong. 18) cgroupstats netlink policy table is mis-sized, from WANG Cong. 19) Several XDP bug fixes in mlx5, from Saeed Mahameed. 20) Fix several device refcount leaks in network drivers, from Johan Hovold. 21) icmp6_send() should use skb dst device not skb->dev to determine L3 routing domain. From David Ahern. 22) ip_vs_genl_family sets maxattr incorrectly, from WANG Cong. 23) We leak new macvlan port in some cases of maclan_common_netlink() errors. Fix from Gao Feng. 24) Similar to the icmp6_send() fix, icmp_route_lookup() should determine L3 routing domain using skb_dst(skb)->dev not skb->dev. Also from David Ahern. 25) Several fixes for route offloading and FIB notification handling in mlxsw driver, from Jiri Pirko. 26) Properly cap __skb_flow_dissect()'s return value, from Eric Dumazet. 27) Fix long standing regression in ipv4 redirect handling, wrt. validating the new neighbour's reachability. From Stephen Suryaputra Lin. 28) If sk_filter() trims the packet excessively, handle it reasonably in tcp input instead of exploding. From Eric Dumazet. 29) Fix handling of napi hash state when copying channels in sfc driver, from Bert Kenward. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (121 commits) mlxsw: spectrum_router: Flush FIB tables during fini net: stmmac: Fix lack of link transition for fixed PHYs sctp: change sk state only when it has assocs in sctp_shutdown bnx2: Wait for in-flight DMA to complete at probe stage Revert "bnx2: Reset device during driver initialization" ps3_gelic: fix spelling mistake in debug message net: ethernet: ixp4xx_eth: fix spelling mistake in debug message ibmvnic: Fix size of debugfs name buffer ibmvnic: Unmap ibmvnic_statistics structure sfc: clear napi_hash state when copying channels mlxsw: spectrum_router: Correctly dump neighbour activity mlxsw: spectrum: Fix refcount bug on span entries bnxt_en: Fix VF virtual link state. bnxt_en: Fix ring arithmetic in bnxt_setup_tc(). Revert "include/uapi/linux/atm_zatm.h: include linux/time.h" tcp: take care of truncations done by sk_filter() ipv4: use new_gw for redirect neigh lookup r8152: Fix error path in open function net: bpqether.h: remove if_ether.h guard net: __skb_flow_dissect() must cap its return value ...
This commit is contained in:
commit
e76d21c40b
|
@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and
|
|||
Switch tagging protocols
|
||||
------------------------
|
||||
|
||||
DSA currently supports 4 different tagging protocols, and a tag-less mode as
|
||||
DSA currently supports 5 different tagging protocols, and a tag-less mode as
|
||||
well. The different protocols are implemented in:
|
||||
|
||||
net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
|
||||
net/dsa/tag_dsa.c: Marvell's original DSA tag
|
||||
net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
|
||||
net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
|
||||
net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
|
||||
|
||||
The exact format of the tag protocol is vendor specific, but in general, they
|
||||
all contain something which:
|
||||
|
|
|
@ -8057,6 +8057,7 @@ F: drivers/infiniband/hw/mlx4/
|
|||
F: include/linux/mlx4/
|
||||
|
||||
MELLANOX MLX5 core VPI driver
|
||||
M: Saeed Mahameed <saeedm@mellanox.com>
|
||||
M: Matan Barak <matanb@mellanox.com>
|
||||
M: Leon Romanovsky <leonro@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
|
@ -142,6 +142,9 @@ struct plx_pci_card {
|
|||
#define CTI_PCI_VENDOR_ID 0x12c4
|
||||
#define CTI_PCI_DEVICE_ID_CRG001 0x0900
|
||||
|
||||
#define MOXA_PCI_VENDOR_ID 0x1393
|
||||
#define MOXA_PCI_DEVICE_ID 0x0100
|
||||
|
||||
static void plx_pci_reset_common(struct pci_dev *pdev);
|
||||
static void plx9056_pci_reset_common(struct pci_dev *pdev);
|
||||
static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
|
||||
|
@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = {
|
|||
/* based on PLX9030 */
|
||||
};
|
||||
|
||||
static struct plx_pci_card_info plx_pci_card_info_moxa = {
|
||||
"MOXA", 2,
|
||||
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
|
||||
{0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} },
|
||||
&plx_pci_reset_common
|
||||
/* based on PLX9052 */
|
||||
};
|
||||
|
||||
static const struct pci_device_id plx_pci_tbl[] = {
|
||||
{
|
||||
/* Adlink PCI-7841/cPCI-7841 */
|
||||
|
@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = {
|
|||
0, 0,
|
||||
(kernel_ulong_t)&plx_pci_card_info_elcus
|
||||
},
|
||||
{
|
||||
/* moxa */
|
||||
MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID,
|
||||
PCI_ANY_ID, PCI_ANY_ID,
|
||||
0, 0,
|
||||
(kernel_ulong_t)&plx_pci_card_info_moxa
|
||||
},
|
||||
{ 0,}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
|
||||
|
|
|
@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
|
|||
return num_msgs;
|
||||
}
|
||||
|
||||
static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 data = 0x7777;
|
||||
|
||||
xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
|
||||
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
|
||||
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
|
||||
xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
|
||||
xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
|
||||
}
|
||||
|
||||
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
|
||||
struct xgene_enet_pdata *pdata,
|
||||
enum xgene_enet_err_code status)
|
||||
|
@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = {
|
|||
.clear = xgene_enet_clear_ring,
|
||||
.wr_cmd = xgene_enet_wr_cmd,
|
||||
.len = xgene_enet_ring_len,
|
||||
.coalesce = xgene_enet_setup_coalescing,
|
||||
};
|
||||
|
|
|
@ -55,8 +55,10 @@ enum xgene_enet_rm {
|
|||
#define PREFETCH_BUF_EN BIT(21)
|
||||
#define CSR_RING_ID_BUF 0x000c
|
||||
#define CSR_PBM_COAL 0x0014
|
||||
#define CSR_PBM_CTICK0 0x0018
|
||||
#define CSR_PBM_CTICK1 0x001c
|
||||
#define CSR_PBM_CTICK2 0x0020
|
||||
#define CSR_PBM_CTICK3 0x0024
|
||||
#define CSR_THRESHOLD0_SET1 0x0030
|
||||
#define CSR_THRESHOLD1_SET1 0x0034
|
||||
#define CSR_RING_NE_INT_MODE 0x017c
|
||||
|
|
|
@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
|
|||
tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
|
||||
}
|
||||
|
||||
pdata->ring_ops->coalesce(pdata->tx_ring[0]);
|
||||
if (pdata->ring_ops->coalesce)
|
||||
pdata->ring_ops->coalesce(pdata->tx_ring[0]);
|
||||
pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
|
|||
ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
|
||||
ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
|
||||
}
|
||||
ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
|
||||
ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
|
||||
|
||||
addr >>= 8;
|
||||
ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
|
||||
|
@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
|
|||
|
||||
static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
|
||||
{
|
||||
u32 data = 0x7777;
|
||||
u32 data = 0x77777777;
|
||||
|
||||
xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
|
||||
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
|
||||
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
|
||||
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
|
||||
xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
|
||||
xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
|
||||
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
|
||||
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
|
||||
xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
|
||||
xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
|
||||
}
|
||||
|
||||
struct xgene_ring_ops xgene_ring2_ops = {
|
||||
|
|
|
@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
|
|||
u32 ctl;
|
||||
|
||||
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
|
||||
|
||||
/* preserve ONLY bits 16-17 from current hardware value */
|
||||
ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
|
||||
|
||||
if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
|
||||
ctl &= ~BGMAC_DMA_RX_BL_MASK;
|
||||
ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
|
||||
|
@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
|
|||
ctl &= ~BGMAC_DMA_RX_PT_MASK;
|
||||
ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
|
||||
}
|
||||
ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
|
||||
ctl |= BGMAC_DMA_RX_ENABLE;
|
||||
ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
|
||||
ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
|
||||
|
@ -1046,9 +1049,9 @@ static void bgmac_enable(struct bgmac *bgmac)
|
|||
|
||||
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
|
||||
BGMAC_DS_MM_SHIFT;
|
||||
if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0)
|
||||
if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
|
||||
bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
|
||||
if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
|
||||
if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
|
||||
bgmac_cco_ctl_maskset(bgmac, 1, ~0,
|
||||
BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#include <linux/firmware.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/crash_dump.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_CNIC)
|
||||
#define BCM_CNIC 1
|
||||
|
@ -4764,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp)
|
|||
BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
|
||||
static void
|
||||
bnx2_wait_dma_complete(struct bnx2 *bp)
|
||||
{
|
||||
u32 val;
|
||||
int i, rc = 0;
|
||||
u8 old_port;
|
||||
int i;
|
||||
|
||||
/* Wait for the current PCI transaction to complete before
|
||||
* issuing a reset. */
|
||||
/*
|
||||
* Wait for the current PCI transaction to complete before
|
||||
* issuing a reset.
|
||||
*/
|
||||
if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
|
||||
(BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
|
||||
BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
|
||||
|
@ -4796,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
|
|||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
|
||||
{
|
||||
u32 val;
|
||||
int i, rc = 0;
|
||||
u8 old_port;
|
||||
|
||||
/* Wait for the current PCI transaction to complete before
|
||||
* issuing a reset. */
|
||||
bnx2_wait_dma_complete(bp);
|
||||
|
||||
/* Wait for the firmware to tell us it is ok to issue a reset. */
|
||||
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
|
||||
|
||||
|
@ -6361,6 +6378,10 @@ bnx2_open(struct net_device *dev)
|
|||
struct bnx2 *bp = netdev_priv(dev);
|
||||
int rc;
|
||||
|
||||
rc = bnx2_request_firmware(bp);
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
netif_carrier_off(dev);
|
||||
|
||||
bnx2_disable_int(bp);
|
||||
|
@ -6429,6 +6450,7 @@ bnx2_open(struct net_device *dev)
|
|||
bnx2_free_irq(bp);
|
||||
bnx2_free_mem(bp);
|
||||
bnx2_del_napi(bp);
|
||||
bnx2_release_firmware(bp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -8575,12 +8597,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
rc = bnx2_request_firmware(bp);
|
||||
if (rc < 0)
|
||||
goto error;
|
||||
/*
|
||||
* In-flight DMA from 1st kernel could continue going in kdump kernel.
|
||||
* New io-page table has been created before bnx2 does reset at open stage.
|
||||
* We have to wait for the in-flight DMA to complete to avoid it look up
|
||||
* into the newly created io-page table.
|
||||
*/
|
||||
if (is_kdump_kernel())
|
||||
bnx2_wait_dma_complete(bp);
|
||||
|
||||
|
||||
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
|
||||
memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
|
||||
|
||||
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
|
||||
|
@ -8613,7 +8638,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
return 0;
|
||||
|
||||
error:
|
||||
bnx2_release_firmware(bp);
|
||||
pci_iounmap(pdev, bp->regview);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
|
|
@ -6309,6 +6309,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
|||
struct tc_to_netdev *ntc)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
bool sh = false;
|
||||
u8 tc;
|
||||
|
||||
if (ntc->type != TC_SETUP_MQPRIO)
|
||||
|
@ -6325,12 +6326,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
|||
if (netdev_get_num_tc(dev) == tc)
|
||||
return 0;
|
||||
|
||||
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
|
||||
sh = true;
|
||||
|
||||
if (tc) {
|
||||
int max_rx_rings, max_tx_rings, rc;
|
||||
bool sh = false;
|
||||
|
||||
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
|
||||
sh = true;
|
||||
|
||||
rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
|
||||
if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
|
||||
|
@ -6348,7 +6348,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
|||
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
|
||||
netdev_reset_tc(dev);
|
||||
}
|
||||
bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
|
||||
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
|
||||
bp->tx_nr_rings + bp->rx_nr_rings;
|
||||
bp->num_stat_ctxs = bp->cp_nr_rings;
|
||||
|
||||
if (netif_running(bp->dev))
|
||||
|
|
|
@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
|
|||
|
||||
if (vf->flags & BNXT_VF_LINK_UP) {
|
||||
/* if physical link is down, force link up on VF */
|
||||
if (phy_qcfg_resp.link ==
|
||||
PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
|
||||
if (phy_qcfg_resp.link !=
|
||||
PORT_PHY_QCFG_RESP_LINK_LINK) {
|
||||
phy_qcfg_resp.link =
|
||||
PORT_PHY_QCFG_RESP_LINK_LINK;
|
||||
phy_qcfg_resp.link_speed = cpu_to_le16(
|
||||
|
|
|
@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
|
|||
return 0;
|
||||
|
||||
hw_cons = *(tcb->hw_consumer_index);
|
||||
rmb();
|
||||
cons = tcb->consumer_index;
|
||||
q_depth = tcb->q_depth;
|
||||
|
||||
|
@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
BNA_QE_INDX_INC(prod, q_depth);
|
||||
tcb->producer_index = prod;
|
||||
|
||||
smp_mb();
|
||||
wmb();
|
||||
|
||||
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
skb_tx_timestamp(skb);
|
||||
|
||||
bna_txq_prod_indx_doorbell(tcb);
|
||||
smp_mb();
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
|
|
@ -178,9 +178,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
|
|||
CH_PCI_ID_TABLE_FENTRY(0x6005),
|
||||
CH_PCI_ID_TABLE_FENTRY(0x6006),
|
||||
CH_PCI_ID_TABLE_FENTRY(0x6007),
|
||||
CH_PCI_ID_TABLE_FENTRY(0x6008),
|
||||
CH_PCI_ID_TABLE_FENTRY(0x6009),
|
||||
CH_PCI_ID_TABLE_FENTRY(0x600d),
|
||||
CH_PCI_ID_TABLE_FENTRY(0x6010),
|
||||
CH_PCI_ID_TABLE_FENTRY(0x6011),
|
||||
CH_PCI_ID_TABLE_FENTRY(0x6014),
|
||||
CH_PCI_ID_TABLE_FENTRY(0x6015),
|
||||
|
|
|
@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
|
|||
return ERR_PTR(-ENODEV);
|
||||
|
||||
handle = dev->ops->get_handle(dev, port_id);
|
||||
if (IS_ERR(handle))
|
||||
if (IS_ERR(handle)) {
|
||||
put_device(&dev->cls_dev);
|
||||
return handle;
|
||||
}
|
||||
|
||||
handle->dev = dev;
|
||||
handle->owner_dev = owner_dev;
|
||||
|
@ -356,6 +358,8 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
|
|||
for (j = i - 1; j >= 0; j--)
|
||||
hnae_fini_queue(handle->qs[j]);
|
||||
|
||||
put_device(&dev->cls_dev);
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
EXPORT_SYMBOL(hnae_get_handle);
|
||||
|
@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h)
|
|||
dev->ops->put_handle(h);
|
||||
|
||||
module_put(dev->owner);
|
||||
|
||||
put_device(&dev->cls_dev);
|
||||
}
|
||||
EXPORT_SYMBOL(hnae_put_handle);
|
||||
|
||||
|
|
|
@ -2446,6 +2446,8 @@ static int ehea_open(struct net_device *dev)
|
|||
|
||||
netif_info(port, ifup, dev, "enabling port\n");
|
||||
|
||||
netif_carrier_off(dev);
|
||||
|
||||
ret = ehea_up(dev);
|
||||
if (!ret) {
|
||||
port_napi_enable(port);
|
||||
|
|
|
@ -1505,9 +1505,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
|
|||
adapter->max_rx_add_entries_per_subcrq > entries_page ?
|
||||
entries_page : adapter->max_rx_add_entries_per_subcrq;
|
||||
|
||||
/* Choosing the maximum number of queues supported by firmware*/
|
||||
adapter->req_tx_queues = adapter->max_tx_queues;
|
||||
adapter->req_rx_queues = adapter->max_rx_queues;
|
||||
adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
|
||||
adapter->req_rx_queues = adapter->opt_rx_comp_queues;
|
||||
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
|
||||
|
||||
adapter->req_mtu = adapter->max_mtu;
|
||||
|
@ -3706,7 +3705,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
struct net_device *netdev;
|
||||
unsigned char *mac_addr_p;
|
||||
struct dentry *ent;
|
||||
char buf[16]; /* debugfs name buf */
|
||||
char buf[17]; /* debugfs name buf */
|
||||
int rc;
|
||||
|
||||
dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
|
||||
|
@ -3845,6 +3844,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
|
|||
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
|
||||
debugfs_remove_recursive(adapter->debugfs_dir);
|
||||
|
||||
dma_unmap_single(&dev->dev, adapter->stats_token,
|
||||
sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
|
||||
|
||||
if (adapter->ras_comps)
|
||||
dma_free_coherent(&dev->dev,
|
||||
adapter->ras_comp_num *
|
||||
|
|
|
@ -1381,6 +1381,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
|
|||
temp = (val & 0x003fff00) >> 8;
|
||||
|
||||
temp *= 64000000;
|
||||
temp += mp->t_clk / 2;
|
||||
do_div(temp, mp->t_clk);
|
||||
|
||||
return (unsigned int)temp;
|
||||
|
@ -1417,6 +1418,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
|
|||
|
||||
temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
|
||||
temp *= 64000000;
|
||||
temp += mp->t_clk / 2;
|
||||
do_div(temp, mp->t_clk);
|
||||
|
||||
return (unsigned int)temp;
|
||||
|
|
|
@ -2202,7 +2202,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
|||
|
||||
if (!shutdown)
|
||||
free_netdev(dev);
|
||||
dev->ethtool_ops = NULL;
|
||||
}
|
||||
|
||||
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
|
||||
|
|
|
@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
c->netdev = priv->netdev;
|
||||
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
|
||||
c->num_tc = priv->params.num_tc;
|
||||
c->xdp = !!priv->xdp_prog;
|
||||
|
||||
if (priv->params.rx_am_enabled)
|
||||
rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
|
||||
|
@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
if (err)
|
||||
goto err_close_tx_cqs;
|
||||
|
||||
/* XDP SQ CQ params are same as normal TXQ sq CQ params */
|
||||
err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
|
||||
priv->params.tx_cq_moderation) : 0;
|
||||
if (err)
|
||||
goto err_close_rx_cq;
|
||||
|
||||
napi_enable(&c->napi);
|
||||
|
||||
err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
|
||||
|
@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
}
|
||||
}
|
||||
|
||||
if (priv->xdp_prog) {
|
||||
/* XDP SQ CQ params are same as normal TXQ sq CQ params */
|
||||
err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
|
||||
priv->params.tx_cq_moderation);
|
||||
if (err)
|
||||
goto err_close_sqs;
|
||||
err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
|
||||
if (err)
|
||||
goto err_close_sqs;
|
||||
|
||||
err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
|
||||
if (err) {
|
||||
mlx5e_close_cq(&c->xdp_sq.cq);
|
||||
goto err_close_sqs;
|
||||
}
|
||||
}
|
||||
|
||||
c->xdp = !!priv->xdp_prog;
|
||||
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
|
||||
if (err)
|
||||
goto err_close_xdp_sq;
|
||||
|
@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
|
||||
return 0;
|
||||
err_close_xdp_sq:
|
||||
mlx5e_close_sq(&c->xdp_sq);
|
||||
if (c->xdp)
|
||||
mlx5e_close_sq(&c->xdp_sq);
|
||||
|
||||
err_close_sqs:
|
||||
mlx5e_close_sqs(c);
|
||||
|
@ -1522,6 +1519,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
|
||||
err_disable_napi:
|
||||
napi_disable(&c->napi);
|
||||
if (c->xdp)
|
||||
mlx5e_close_cq(&c->xdp_sq.cq);
|
||||
|
||||
err_close_rx_cq:
|
||||
mlx5e_close_cq(&c->rq.cq);
|
||||
|
||||
err_close_tx_cqs:
|
||||
|
|
|
@ -308,7 +308,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
|
|||
netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
|
||||
#endif
|
||||
|
||||
netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC;
|
||||
netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
|
||||
netdev->hw_features |= NETIF_F_HW_TC;
|
||||
|
||||
eth_hw_addr_random(netdev);
|
||||
|
|
|
@ -237,12 +237,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
|
|||
skb_flow_dissector_target(f->dissector,
|
||||
FLOW_DISSECTOR_KEY_VLAN,
|
||||
f->mask);
|
||||
if (mask->vlan_id) {
|
||||
if (mask->vlan_id || mask->vlan_priority) {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|||
if (esw->mode != SRIOV_OFFLOADS)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
action = attr->action;
|
||||
/* per flow vlan pop/push is emulated, don't set that into the firmware */
|
||||
action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
|
||||
|
||||
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
|
||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
||||
|
|
|
@ -1690,7 +1690,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering)
|
|||
{
|
||||
|
||||
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
|
||||
if (IS_ERR_OR_NULL(steering->root_ns))
|
||||
if (!steering->root_ns)
|
||||
goto cleanup;
|
||||
|
||||
if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
|
||||
|
|
|
@ -1226,6 +1226,9 @@ static int init_one(struct pci_dev *pdev,
|
|||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
dev->pdev = pdev;
|
||||
dev->event = mlx5_core_event;
|
||||
|
||||
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
|
||||
mlx5_core_warn(dev,
|
||||
"selected profile out of range, selecting default (%d)\n",
|
||||
|
@ -1233,8 +1236,6 @@ static int init_one(struct pci_dev *pdev,
|
|||
prof_sel = MLX5_DEFAULT_PROF;
|
||||
}
|
||||
dev->profile = &profile[prof_sel];
|
||||
dev->pdev = pdev;
|
||||
dev->event = mlx5_core_event;
|
||||
|
||||
INIT_LIST_HEAD(&priv->ctx_list);
|
||||
spin_lock_init(&priv->ctx_lock);
|
||||
|
|
|
@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
|
|||
|
||||
span_entry->used = true;
|
||||
span_entry->id = index;
|
||||
span_entry->ref_count = 0;
|
||||
span_entry->ref_count = 1;
|
||||
span_entry->local_port = local_port;
|
||||
return span_entry;
|
||||
}
|
||||
|
@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry
|
|||
|
||||
span_entry = mlxsw_sp_span_entry_find(port);
|
||||
if (span_entry) {
|
||||
/* Already exists, just take a reference */
|
||||
span_entry->ref_count++;
|
||||
return span_entry;
|
||||
}
|
||||
|
@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry
|
|||
static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_span_entry *span_entry)
|
||||
{
|
||||
WARN_ON(!span_entry->ref_count);
|
||||
if (--span_entry->ref_count == 0)
|
||||
mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
|
||||
return 0;
|
||||
|
|
|
@ -115,7 +115,7 @@ struct mlxsw_sp_rif {
|
|||
struct mlxsw_sp_mid {
|
||||
struct list_head list;
|
||||
unsigned char addr[ETH_ALEN];
|
||||
u16 vid;
|
||||
u16 fid;
|
||||
u16 mid;
|
||||
unsigned int ref_count;
|
||||
};
|
||||
|
|
|
@ -594,21 +594,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
|
||||
|
||||
static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
mlxsw_sp_router_fib_flush(mlxsw_sp);
|
||||
kfree(mlxsw_sp->router.vrs);
|
||||
}
|
||||
|
||||
struct mlxsw_sp_neigh_key {
|
||||
unsigned char addr[sizeof(struct in6_addr)];
|
||||
struct net_device *dev;
|
||||
struct neighbour *n;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_neigh_entry {
|
||||
struct rhash_head ht_node;
|
||||
struct mlxsw_sp_neigh_key key;
|
||||
u16 rif;
|
||||
struct neighbour *n;
|
||||
bool offloaded;
|
||||
struct delayed_work dw;
|
||||
struct mlxsw_sp_port *mlxsw_sp_port;
|
||||
|
@ -646,19 +647,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
|
|||
static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
|
||||
|
||||
static struct mlxsw_sp_neigh_entry *
|
||||
mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
|
||||
struct net_device *dev, u16 rif,
|
||||
struct neighbour *n)
|
||||
mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
|
||||
{
|
||||
struct mlxsw_sp_neigh_entry *neigh_entry;
|
||||
|
||||
neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
|
||||
if (!neigh_entry)
|
||||
return NULL;
|
||||
memcpy(neigh_entry->key.addr, addr, addr_len);
|
||||
neigh_entry->key.dev = dev;
|
||||
neigh_entry->key.n = n;
|
||||
neigh_entry->rif = rif;
|
||||
neigh_entry->n = n;
|
||||
INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
|
||||
INIT_LIST_HEAD(&neigh_entry->nexthop_list);
|
||||
return neigh_entry;
|
||||
|
@ -671,13 +668,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
|
|||
}
|
||||
|
||||
static struct mlxsw_sp_neigh_entry *
|
||||
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
|
||||
size_t addr_len, struct net_device *dev)
|
||||
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
|
||||
{
|
||||
struct mlxsw_sp_neigh_key key = {{ 0 } };
|
||||
struct mlxsw_sp_neigh_key key;
|
||||
|
||||
memcpy(key.addr, addr, addr_len);
|
||||
key.dev = dev;
|
||||
key.n = n;
|
||||
return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
|
||||
&key, mlxsw_sp_neigh_ht_params);
|
||||
}
|
||||
|
@ -689,26 +684,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
|
|||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct mlxsw_sp_neigh_entry *neigh_entry;
|
||||
struct mlxsw_sp_rif *r;
|
||||
u32 dip;
|
||||
int err;
|
||||
|
||||
if (n->tbl != &arp_tbl)
|
||||
return 0;
|
||||
|
||||
dip = ntohl(*((__be32 *) n->primary_key));
|
||||
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
|
||||
n->dev);
|
||||
if (neigh_entry) {
|
||||
WARN_ON(neigh_entry->n != n);
|
||||
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
|
||||
if (neigh_entry)
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
|
||||
if (WARN_ON(!r))
|
||||
return -EINVAL;
|
||||
|
||||
neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
|
||||
r->rif, n);
|
||||
neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
|
||||
if (!neigh_entry)
|
||||
return -ENOMEM;
|
||||
err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
|
||||
|
@ -727,14 +716,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
|
|||
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct mlxsw_sp_neigh_entry *neigh_entry;
|
||||
u32 dip;
|
||||
|
||||
if (n->tbl != &arp_tbl)
|
||||
return;
|
||||
|
||||
dip = ntohl(*((__be32 *) n->primary_key));
|
||||
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
|
||||
n->dev);
|
||||
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
|
||||
if (!neigh_entry)
|
||||
return;
|
||||
mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
|
||||
|
@ -817,6 +803,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
|
|||
}
|
||||
}
|
||||
|
||||
static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
|
||||
{
|
||||
u8 num_rec, last_rec_index, num_entries;
|
||||
|
||||
num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
|
||||
last_rec_index = num_rec - 1;
|
||||
|
||||
if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
|
||||
return false;
|
||||
if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
|
||||
MLXSW_REG_RAUHTD_TYPE_IPV6)
|
||||
return true;
|
||||
|
||||
num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
|
||||
last_rec_index);
|
||||
if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
char *rauhtd_pl;
|
||||
|
@ -843,7 +849,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
|
|||
for (i = 0; i < num_rec; i++)
|
||||
mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
|
||||
i);
|
||||
} while (num_rec);
|
||||
} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
|
||||
rtnl_unlock();
|
||||
|
||||
kfree(rauhtd_pl);
|
||||
|
@ -862,7 +868,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
|
|||
* is active regardless of the traffic.
|
||||
*/
|
||||
if (!list_empty(&neigh_entry->nexthop_list))
|
||||
neigh_event_send(neigh_entry->n, NULL);
|
||||
neigh_event_send(neigh_entry->key.n, NULL);
|
||||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
@ -908,9 +914,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
|
|||
rtnl_lock();
|
||||
list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
|
||||
nexthop_neighs_list_node) {
|
||||
if (!(neigh_entry->n->nud_state & NUD_VALID) &&
|
||||
if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
|
||||
!list_empty(&neigh_entry->nexthop_list))
|
||||
neigh_event_send(neigh_entry->n, NULL);
|
||||
neigh_event_send(neigh_entry->key.n, NULL);
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
|
@ -927,7 +933,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
|
|||
{
|
||||
struct mlxsw_sp_neigh_entry *neigh_entry =
|
||||
container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
|
||||
struct neighbour *n = neigh_entry->n;
|
||||
struct neighbour *n = neigh_entry->key.n;
|
||||
struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
char rauht_pl[MLXSW_REG_RAUHT_LEN];
|
||||
|
@ -1030,11 +1036,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
|
|||
|
||||
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
dip = ntohl(*((__be32 *) n->primary_key));
|
||||
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
|
||||
&dip,
|
||||
sizeof(__be32),
|
||||
dev);
|
||||
if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
|
||||
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
|
||||
if (WARN_ON(!neigh_entry)) {
|
||||
mlxsw_sp_port_dev_put(mlxsw_sp_port);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
@ -1343,33 +1346,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
|
|||
struct fib_nh *fib_nh)
|
||||
{
|
||||
struct mlxsw_sp_neigh_entry *neigh_entry;
|
||||
u32 gwip = ntohl(fib_nh->nh_gw);
|
||||
struct net_device *dev = fib_nh->nh_dev;
|
||||
struct neighbour *n;
|
||||
u8 nud_state;
|
||||
|
||||
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
|
||||
sizeof(gwip), dev);
|
||||
if (!neigh_entry) {
|
||||
__be32 gwipn = htonl(gwip);
|
||||
|
||||
n = neigh_create(&arp_tbl, &gwipn, dev);
|
||||
/* Take a reference of neigh here ensuring that neigh would
|
||||
* not be detructed before the nexthop entry is finished.
|
||||
* The reference is taken either in neigh_lookup() or
|
||||
* in neith_create() in case n is not found.
|
||||
*/
|
||||
n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
|
||||
if (!n) {
|
||||
n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
|
||||
if (IS_ERR(n))
|
||||
return PTR_ERR(n);
|
||||
neigh_event_send(n, NULL);
|
||||
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
|
||||
sizeof(gwip), dev);
|
||||
if (!neigh_entry) {
|
||||
neigh_release(n);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
/* Take a reference of neigh here ensuring that neigh would
|
||||
* not be detructed before the nexthop entry is finished.
|
||||
* The second branch takes the reference in neith_create()
|
||||
*/
|
||||
n = neigh_entry->n;
|
||||
neigh_clone(n);
|
||||
}
|
||||
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
|
||||
if (!neigh_entry) {
|
||||
neigh_release(n);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* If that is the first nexthop connected to that neigh, add to
|
||||
|
@ -1403,7 +1399,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
|
|||
if (list_empty(&nh->neigh_entry->nexthop_list))
|
||||
list_del(&nh->neigh_entry->nexthop_neighs_list_node);
|
||||
|
||||
neigh_release(neigh_entry->n);
|
||||
neigh_release(neigh_entry->key.n);
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_nexthop_group *
|
||||
|
@ -1463,11 +1459,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
|
|||
|
||||
for (i = 0; i < fi->fib_nhs; i++) {
|
||||
struct fib_nh *fib_nh = &fi->fib_nh[i];
|
||||
u32 gwip = ntohl(fib_nh->nh_gw);
|
||||
struct neighbour *n = nh->neigh_entry->key.n;
|
||||
|
||||
if (memcmp(nh->neigh_entry->key.addr,
|
||||
&gwip, sizeof(u32)) == 0 &&
|
||||
nh->neigh_entry->key.dev == fib_nh->nh_dev)
|
||||
if (memcmp(n->primary_key, &fib_nh->nh_gw,
|
||||
sizeof(fib_nh->nh_gw)) == 0 &&
|
||||
n->dev == fib_nh->nh_dev)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -1874,18 +1870,18 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
|
|||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
|
||||
}
|
||||
|
||||
static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
|
||||
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
struct mlxsw_resources *resources;
|
||||
struct mlxsw_sp_fib_entry *fib_entry;
|
||||
struct mlxsw_sp_fib_entry *tmp;
|
||||
struct mlxsw_sp_vr *vr;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
resources = mlxsw_core_resources_get(mlxsw_sp->core);
|
||||
for (i = 0; i < resources->max_virtual_routers; i++) {
|
||||
vr = &mlxsw_sp->router.vrs[i];
|
||||
|
||||
if (!vr->used)
|
||||
continue;
|
||||
|
||||
|
@ -1901,6 +1897,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
|
|||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
int err;
|
||||
|
||||
mlxsw_sp_router_fib_flush(mlxsw_sp);
|
||||
mlxsw_sp->router.aborted = true;
|
||||
err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
|
||||
if (err)
|
||||
|
@ -1958,6 +1961,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
|
|||
struct fib_entry_notifier_info *fen_info = ptr;
|
||||
int err;
|
||||
|
||||
if (!net_eq(fen_info->info.net, &init_net))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
case FIB_EVENT_ENTRY_ADD:
|
||||
err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
|
||||
|
|
|
@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
|
|||
|
||||
static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
|
||||
const unsigned char *addr,
|
||||
u16 vid)
|
||||
u16 fid)
|
||||
{
|
||||
struct mlxsw_sp_mid *mid;
|
||||
|
||||
list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
|
||||
if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
|
||||
if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
|
||||
return mid;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
|
|||
|
||||
static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
|
||||
const unsigned char *addr,
|
||||
u16 vid)
|
||||
u16 fid)
|
||||
{
|
||||
struct mlxsw_sp_mid *mid;
|
||||
u16 mid_idx;
|
||||
|
@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
|
|||
|
||||
set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
|
||||
ether_addr_copy(mid->addr, addr);
|
||||
mid->vid = vid;
|
||||
mid->fid = fid;
|
||||
mid->mid = mid_idx;
|
||||
mid->ref_count = 0;
|
||||
list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
|
||||
|
@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
if (switchdev_trans_ph_prepare(trans))
|
||||
return 0;
|
||||
|
||||
mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
|
||||
mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
|
||||
if (!mid) {
|
||||
mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
|
||||
mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
|
||||
if (!mid) {
|
||||
netdev_err(dev, "Unable to allocate MC group\n");
|
||||
return -ENOMEM;
|
||||
|
@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
u16 mid_idx;
|
||||
int err = 0;
|
||||
|
||||
mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
|
||||
mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
|
||||
if (!mid) {
|
||||
netdev_err(dev, "Unable to remove port from MC DB\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -727,9 +727,6 @@ struct core_tx_bd_flags {
|
|||
#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
|
||||
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
|
||||
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
|
||||
#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
|
||||
#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
|
||||
|
||||
};
|
||||
|
||||
struct core_tx_bd {
|
||||
|
|
|
@ -1119,6 +1119,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
|
|||
start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
|
||||
CORE_TX_BD_FLAGS_START_BD_SHIFT;
|
||||
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
|
||||
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
|
||||
DMA_REGPAIR_LE(start_bd->addr, first_frag);
|
||||
start_bd->nbytes = cpu_to_le16(first_frag_len);
|
||||
|
||||
|
|
|
@ -839,20 +839,19 @@ static void qed_update_pf_params(struct qed_dev *cdev,
|
|||
{
|
||||
int i;
|
||||
|
||||
if (IS_ENABLED(CONFIG_QED_RDMA)) {
|
||||
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
|
||||
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
|
||||
/* divide by 3 the MRs to avoid MF ILT overflow */
|
||||
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
|
||||
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
|
||||
}
|
||||
|
||||
for (i = 0; i < cdev->num_hwfns; i++) {
|
||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
||||
|
||||
p_hwfn->pf_params = *params;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_QED_RDMA))
|
||||
return;
|
||||
|
||||
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
|
||||
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
|
||||
/* divide by 3 the MRs to avoid MF ILT overflow */
|
||||
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
|
||||
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
|
||||
}
|
||||
|
||||
static int qed_slowpath_start(struct qed_dev *cdev,
|
||||
|
|
|
@ -175,16 +175,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
|
|||
for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
|
||||
int tc;
|
||||
|
||||
for (j = 0; j < QEDE_NUM_RQSTATS; j++)
|
||||
sprintf(buf + (k + j) * ETH_GSTRING_LEN,
|
||||
"%d: %s", i, qede_rqstats_arr[j].string);
|
||||
k += QEDE_NUM_RQSTATS;
|
||||
for (tc = 0; tc < edev->num_tc; tc++) {
|
||||
for (j = 0; j < QEDE_NUM_TQSTATS; j++)
|
||||
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
|
||||
for (j = 0; j < QEDE_NUM_RQSTATS; j++)
|
||||
sprintf(buf + (k + j) * ETH_GSTRING_LEN,
|
||||
"%d.%d: %s", i, tc,
|
||||
qede_tqstats_arr[j].string);
|
||||
k += QEDE_NUM_TQSTATS;
|
||||
"%d: %s", i,
|
||||
qede_rqstats_arr[j].string);
|
||||
k += QEDE_NUM_RQSTATS;
|
||||
}
|
||||
|
||||
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
|
||||
for (tc = 0; tc < edev->num_tc; tc++) {
|
||||
for (j = 0; j < QEDE_NUM_TQSTATS; j++)
|
||||
sprintf(buf + (k + j) *
|
||||
ETH_GSTRING_LEN,
|
||||
"%d.%d: %s", i, tc,
|
||||
qede_tqstats_arr[j].string);
|
||||
k += QEDE_NUM_TQSTATS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2839,7 +2839,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
|
|||
}
|
||||
|
||||
mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
|
||||
rxq->rx_buf_size, DMA_FROM_DEVICE);
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
|
||||
DP_NOTICE(edev,
|
||||
"Failed to map TPA replacement buffer\n");
|
||||
|
|
|
@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt)
|
|||
|
||||
mac |= TXEN | RXEN; /* enable RX/TX */
|
||||
|
||||
/* We don't have ethtool support yet, so force flow-control mode
|
||||
* to 'full' always.
|
||||
*/
|
||||
mac |= TXFC | RXFC;
|
||||
/* Configure MAC flow control to match the PHY's settings. */
|
||||
if (phydev->pause)
|
||||
mac |= RXFC;
|
||||
if (phydev->pause != phydev->asym_pause)
|
||||
mac |= TXFC;
|
||||
|
||||
/* setup link speed */
|
||||
mac &= ~SPEED_MASK;
|
||||
|
@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt)
|
|||
writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
|
||||
writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
|
||||
|
||||
/* Enable pause frames. Without this feature, the EMAC has been shown
|
||||
* to receive (and drop) frames with FCS errors at gigabit connections.
|
||||
*/
|
||||
adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
|
||||
adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
|
||||
|
||||
adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
phy_start(adpt->phydev);
|
||||
|
||||
|
|
|
@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = {
|
|||
/* CDR Settings */
|
||||
{EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
|
||||
UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
|
||||
{EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)},
|
||||
{EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
|
||||
{EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
|
||||
|
||||
/* TX/RX Settings */
|
||||
|
|
|
@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel)
|
|||
*channel = *old_channel;
|
||||
|
||||
channel->napi_dev = NULL;
|
||||
INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
|
||||
channel->napi_str.napi_id = 0;
|
||||
channel->napi_str.state = 0;
|
||||
memset(&channel->eventq, 0, sizeof(channel->eventq));
|
||||
|
||||
for (j = 0; j < EFX_TXQ_TYPES; j++) {
|
||||
|
|
|
@ -880,6 +880,13 @@ static int stmmac_init_phy(struct net_device *dev)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
|
||||
* subsequent PHY polling, make sure we force a link transition if
|
||||
* we have a UP/DOWN/UP transition
|
||||
*/
|
||||
if (phydev->is_pseudo_fixed_link)
|
||||
phydev->irq = PHY_POLL;
|
||||
|
||||
pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
|
||||
" Link = %d\n", dev->name, phydev->phy_id, phydev->link);
|
||||
|
||||
|
|
|
@ -176,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
|
|||
}
|
||||
|
||||
dev = bus_find_device(&platform_bus_type, NULL, node, match);
|
||||
of_node_put(node);
|
||||
priv = dev_get_drvdata(dev);
|
||||
|
||||
priv->cpsw_phy_sel(priv, phy_mode, slave);
|
||||
|
||||
put_device(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpsw_phy_sel);
|
||||
|
||||
|
|
|
@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev)
|
|||
int i = 0;
|
||||
struct emac_priv *priv = netdev_priv(ndev);
|
||||
struct phy_device *phydev = NULL;
|
||||
struct device *phy = NULL;
|
||||
|
||||
ret = pm_runtime_get_sync(&priv->pdev->dev);
|
||||
if (ret < 0) {
|
||||
|
@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev)
|
|||
|
||||
/* use the first phy on the bus if pdata did not give us a phy id */
|
||||
if (!phydev && !priv->phy_id) {
|
||||
struct device *phy;
|
||||
|
||||
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
|
||||
match_first_device);
|
||||
if (phy)
|
||||
if (phy) {
|
||||
priv->phy_id = dev_name(phy);
|
||||
if (!priv->phy_id || !*priv->phy_id)
|
||||
put_device(phy);
|
||||
}
|
||||
}
|
||||
|
||||
if (!phydev && priv->phy_id && *priv->phy_id) {
|
||||
phydev = phy_connect(ndev, priv->phy_id,
|
||||
&emac_adjust_link,
|
||||
PHY_INTERFACE_MODE_MII);
|
||||
|
||||
put_device(phy); /* reference taken by bus_find_device */
|
||||
if (IS_ERR(phydev)) {
|
||||
dev_err(emac_dev, "could not connect to phy %s\n",
|
||||
priv->phy_id);
|
||||
|
|
|
@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
|
|||
pr_debug("%s: bssid matched\n", __func__);
|
||||
break;
|
||||
} else {
|
||||
pr_debug("%s: bssid unmached\n", __func__);
|
||||
pr_debug("%s: bssid unmatched\n", __func__);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
|
|||
if (!qmgr_stat_below_low_watermark(rxq) &&
|
||||
napi_reschedule(napi)) { /* not empty again */
|
||||
#if DEBUG_RX
|
||||
printk(KERN_DEBUG "%s: eth_poll"
|
||||
" napi_reschedule successed\n",
|
||||
printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
|
||||
dev->name);
|
||||
#endif
|
||||
qmgr_disable_irq(rxq);
|
||||
|
|
|
@ -1278,6 +1278,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
|
|||
struct net_device *lowerdev;
|
||||
int err;
|
||||
int macmode;
|
||||
bool create = false;
|
||||
|
||||
if (!tb[IFLA_LINK])
|
||||
return -EINVAL;
|
||||
|
@ -1304,12 +1305,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
|
|||
err = macvlan_port_create(lowerdev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
create = true;
|
||||
}
|
||||
port = macvlan_port_get_rtnl(lowerdev);
|
||||
|
||||
/* Only 1 macvlan device can be created in passthru mode */
|
||||
if (port->passthru)
|
||||
return -EINVAL;
|
||||
if (port->passthru) {
|
||||
/* The macvlan port must be not created this time,
|
||||
* still goto destroy_macvlan_port for readability.
|
||||
*/
|
||||
err = -EINVAL;
|
||||
goto destroy_macvlan_port;
|
||||
}
|
||||
|
||||
vlan->lowerdev = lowerdev;
|
||||
vlan->dev = dev;
|
||||
|
@ -1325,24 +1332,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
|
|||
vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
|
||||
|
||||
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
|
||||
if (port->count)
|
||||
return -EINVAL;
|
||||
if (port->count) {
|
||||
err = -EINVAL;
|
||||
goto destroy_macvlan_port;
|
||||
}
|
||||
port->passthru = true;
|
||||
eth_hw_addr_inherit(dev, lowerdev);
|
||||
}
|
||||
|
||||
if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
|
||||
if (vlan->mode != MACVLAN_MODE_SOURCE)
|
||||
return -EINVAL;
|
||||
if (vlan->mode != MACVLAN_MODE_SOURCE) {
|
||||
err = -EINVAL;
|
||||
goto destroy_macvlan_port;
|
||||
}
|
||||
macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
|
||||
err = macvlan_changelink_sources(vlan, macmode, data);
|
||||
if (err)
|
||||
return err;
|
||||
goto destroy_macvlan_port;
|
||||
}
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto destroy_macvlan_port;
|
||||
|
||||
dev->priv_flags |= IFF_MACVLAN;
|
||||
err = netdev_upper_dev_link(lowerdev, dev);
|
||||
|
@ -1357,7 +1368,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
|
|||
|
||||
unregister_netdev:
|
||||
unregister_netdevice(dev);
|
||||
|
||||
destroy_macvlan_port:
|
||||
if (create)
|
||||
macvlan_port_destroy(port->dev);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
|
||||
|
|
|
@ -723,6 +723,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
|
|||
phydev = to_phy_device(d);
|
||||
|
||||
rc = phy_connect_direct(dev, phydev, handler, interface);
|
||||
put_device(d);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
|
@ -953,6 +954,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
|
|||
phydev = to_phy_device(d);
|
||||
|
||||
rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
|
||||
put_device(d);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
|
|
|
@ -1656,6 +1656,19 @@ static const struct driver_info ax88178a_info = {
|
|||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
|
||||
static const struct driver_info cypress_GX3_info = {
|
||||
.description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller",
|
||||
.bind = ax88179_bind,
|
||||
.unbind = ax88179_unbind,
|
||||
.status = ax88179_status,
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
|
||||
static const struct driver_info dlink_dub1312_info = {
|
||||
.description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
|
||||
.bind = ax88179_bind,
|
||||
|
@ -1717,6 +1730,10 @@ static const struct usb_device_id products[] = {
|
|||
/* ASIX AX88178A 10/100/1000 */
|
||||
USB_DEVICE(0x0b95, 0x178a),
|
||||
.driver_info = (unsigned long)&ax88178a_info,
|
||||
}, {
|
||||
/* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */
|
||||
USB_DEVICE(0x04b4, 0x3610),
|
||||
.driver_info = (unsigned long)&cypress_GX3_info,
|
||||
}, {
|
||||
/* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
|
||||
USB_DEVICE(0x2001, 0x4a00),
|
||||
|
|
|
@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
|
|||
u8 checksum = CHECKSUM_NONE;
|
||||
u32 opts2, opts3;
|
||||
|
||||
if (tp->version == RTL_VER_01)
|
||||
if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
|
||||
goto return_result;
|
||||
|
||||
opts2 = le32_to_cpu(rx_desc->opts2);
|
||||
|
@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
|
|||
checksum = CHECKSUM_NONE;
|
||||
else
|
||||
checksum = CHECKSUM_UNNECESSARY;
|
||||
} else if (RD_IPV6_CS) {
|
||||
} else if (opts2 & RD_IPV6_CS) {
|
||||
if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
|
||||
checksum = CHECKSUM_UNNECESSARY;
|
||||
else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
|
||||
|
@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev)
|
|||
goto out;
|
||||
|
||||
res = usb_autopm_get_interface(tp->intf);
|
||||
if (res < 0) {
|
||||
free_all_mem(tp);
|
||||
goto out;
|
||||
}
|
||||
if (res < 0)
|
||||
goto out_free;
|
||||
|
||||
mutex_lock(&tp->control);
|
||||
|
||||
|
@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev)
|
|||
netif_device_detach(tp->netdev);
|
||||
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
|
||||
res);
|
||||
free_all_mem(tp);
|
||||
} else {
|
||||
napi_enable(&tp->napi);
|
||||
goto out_unlock;
|
||||
}
|
||||
napi_enable(&tp->napi);
|
||||
|
||||
mutex_unlock(&tp->control);
|
||||
|
||||
|
@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev)
|
|||
tp->pm_notifier.notifier_call = rtl_notifier;
|
||||
register_pm_notifier(&tp->pm_notifier);
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&tp->control);
|
||||
usb_autopm_put_interface(tp->intf);
|
||||
out_free:
|
||||
free_all_mem(tp);
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -2038,23 +2038,33 @@ static struct virtio_device_id id_table[] = {
|
|||
{ 0 },
|
||||
};
|
||||
|
||||
#define VIRTNET_FEATURES \
|
||||
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
|
||||
VIRTIO_NET_F_MAC, \
|
||||
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
|
||||
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
|
||||
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
|
||||
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
|
||||
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
|
||||
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
|
||||
VIRTIO_NET_F_CTRL_MAC_ADDR, \
|
||||
VIRTIO_NET_F_MTU
|
||||
|
||||
static unsigned int features[] = {
|
||||
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
|
||||
VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
|
||||
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
|
||||
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
|
||||
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
|
||||
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
|
||||
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
|
||||
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
|
||||
VIRTIO_NET_F_CTRL_MAC_ADDR,
|
||||
VIRTNET_FEATURES,
|
||||
};
|
||||
|
||||
static unsigned int features_legacy[] = {
|
||||
VIRTNET_FEATURES,
|
||||
VIRTIO_NET_F_GSO,
|
||||
VIRTIO_F_ANY_LAYOUT,
|
||||
VIRTIO_NET_F_MTU,
|
||||
};
|
||||
|
||||
static struct virtio_driver virtio_net_driver = {
|
||||
.feature_table = features,
|
||||
.feature_table_size = ARRAY_SIZE(features),
|
||||
.feature_table_legacy = features_legacy,
|
||||
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
|
|
|
@ -944,7 +944,9 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
|
|||
{
|
||||
struct vxlan_dev *vxlan;
|
||||
struct vxlan_sock *sock4;
|
||||
struct vxlan_sock *sock6 = NULL;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct vxlan_sock *sock6;
|
||||
#endif
|
||||
unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
|
||||
|
||||
sock4 = rtnl_dereference(dev->vn4_sock);
|
||||
|
|
|
@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
|
|||
/* store current 11d setting */
|
||||
if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
|
||||
&ifp->vif->is_11d)) {
|
||||
supports_11d = false;
|
||||
is_11d = supports_11d = false;
|
||||
} else {
|
||||
country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
|
||||
settings->beacon.tail_len,
|
||||
|
|
|
@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
|
|||
ret = iwl_mvm_switch_to_d3(mvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
/* In theory, we wouldn't have to stop a running sched
|
||||
* scan in order to start another one (for
|
||||
* net-detect). But in practice this doesn't seem to
|
||||
* work properly, so stop any running sched_scan now.
|
||||
*/
|
||||
ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* rfkill release can be either for wowlan or netdetect */
|
||||
|
@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
|
|||
out:
|
||||
if (ret < 0) {
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
|
||||
ieee80211_restart_hw(mvm->hw);
|
||||
if (mvm->restart_fw > 0) {
|
||||
mvm->restart_fw--;
|
||||
ieee80211_restart_hw(mvm->hw);
|
||||
}
|
||||
iwl_mvm_free_nd(mvm);
|
||||
}
|
||||
out_noreset:
|
||||
|
@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
|||
iwl_mvm_update_changed_regdom(mvm);
|
||||
|
||||
if (mvm->net_detect) {
|
||||
/* If this is a non-unified image, we restart the FW,
|
||||
* so no need to stop the netdetect scan. If that
|
||||
* fails, continue and try to get the wake-up reasons,
|
||||
* but trigger a HW restart by keeping a failure code
|
||||
* in ret.
|
||||
*/
|
||||
if (unified_image)
|
||||
ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
|
||||
false);
|
||||
|
||||
iwl_mvm_query_netdetect_reasons(mvm, vif);
|
||||
/* has unlocked the mutex, so skip that */
|
||||
goto out;
|
||||
|
@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
|
|||
static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct iwl_mvm *mvm = inode->i_private;
|
||||
int remaining_time = 10;
|
||||
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
|
||||
|
||||
mvm->d3_test_active = false;
|
||||
|
||||
|
@ -2282,18 +2305,22 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
|
|||
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
|
||||
|
||||
iwl_abort_notification_waits(&mvm->notif_wait);
|
||||
ieee80211_restart_hw(mvm->hw);
|
||||
if (!unified_image) {
|
||||
int remaining_time = 10;
|
||||
|
||||
/* wait for restart and disconnect all interfaces */
|
||||
while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
|
||||
remaining_time > 0) {
|
||||
remaining_time--;
|
||||
msleep(1000);
|
||||
ieee80211_restart_hw(mvm->hw);
|
||||
|
||||
/* wait for restart and disconnect all interfaces */
|
||||
while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
|
||||
remaining_time > 0) {
|
||||
remaining_time--;
|
||||
msleep(1000);
|
||||
}
|
||||
|
||||
if (remaining_time == 0)
|
||||
IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
|
||||
}
|
||||
|
||||
if (remaining_time == 0)
|
||||
IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(
|
||||
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
|
||||
|
|
|
@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
|
|||
.data = { &cmd, },
|
||||
.len = { sizeof(cmd) },
|
||||
};
|
||||
size_t delta, len;
|
||||
ssize_t ret;
|
||||
size_t delta;
|
||||
ssize_t ret, len;
|
||||
|
||||
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
|
||||
DEBUG_GROUP, 0);
|
||||
|
|
|
@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
|||
struct iwl_mvm_internal_rxq_notif *notif,
|
||||
u32 size)
|
||||
{
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
|
||||
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
|
||||
int ret;
|
||||
|
||||
|
@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
if (notif->sync)
|
||||
ret = wait_event_timeout(notif_waitq,
|
||||
ret = wait_event_timeout(mvm->rx_sync_waitq,
|
||||
atomic_read(&mvm->queue_sync_counter) == 0,
|
||||
HZ);
|
||||
WARN_ON_ONCE(!ret);
|
||||
|
|
|
@ -937,6 +937,7 @@ struct iwl_mvm {
|
|||
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
|
||||
spinlock_t d0i3_tx_lock;
|
||||
wait_queue_head_t d0i3_exit_waitq;
|
||||
wait_queue_head_t rx_sync_waitq;
|
||||
|
||||
/* BT-Coex */
|
||||
struct iwl_bt_coex_profile_notif last_bt_notif;
|
||||
|
|
|
@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
spin_lock_init(&mvm->refs_lock);
|
||||
skb_queue_head_init(&mvm->d0i3_tx);
|
||||
init_waitqueue_head(&mvm->d0i3_exit_waitq);
|
||||
init_waitqueue_head(&mvm->rx_sync_waitq);
|
||||
|
||||
atomic_set(&mvm->queue_sync_counter, 0);
|
||||
|
||||
|
|
|
@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
|||
"Received expired RX queue sync message\n");
|
||||
return;
|
||||
}
|
||||
atomic_dec(&mvm->queue_sync_counter);
|
||||
if (!atomic_dec_return(&mvm->queue_sync_counter))
|
||||
wake_up(&mvm->rx_sync_waitq);
|
||||
}
|
||||
|
||||
switch (internal_notif->type) {
|
||||
|
|
|
@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
|
|||
|
||||
static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
|
||||
{
|
||||
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
|
||||
|
||||
/* This looks a bit arbitrary, but the idea is that if we run
|
||||
* out of possible simultaneous scans and the userspace is
|
||||
* trying to run a scan type that is already running, we
|
||||
|
@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
|
|||
return -EBUSY;
|
||||
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
|
||||
case IWL_MVM_SCAN_NETDETECT:
|
||||
/* No need to stop anything for net-detect since the
|
||||
* firmware is restarted anyway. This way, any sched
|
||||
* scans that were running will be restarted when we
|
||||
* resume.
|
||||
*/
|
||||
return 0;
|
||||
/* For non-unified images, there's no need to stop
|
||||
* anything for net-detect since the firmware is
|
||||
* restarted anyway. This way, any sched scans that
|
||||
* were running will be restarted when we resume.
|
||||
*/
|
||||
if (!unified_image)
|
||||
return 0;
|
||||
|
||||
/* If this is a unified image and we ran out of scans,
|
||||
* we need to stop something. Prefer stopping regular
|
||||
* scans, because the results are useless at this
|
||||
* point, and we should be able to keep running
|
||||
* another scheduled scan while suspended.
|
||||
*/
|
||||
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
|
||||
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
|
||||
true);
|
||||
if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
|
||||
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
|
||||
true);
|
||||
|
||||
/* fall through, something is wrong if no scan was
|
||||
* running but we ran out of scans.
|
||||
*/
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
|
|
|
@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
#define SPL_METHOD "SPLC"
|
||||
#define SPL_DOMAINTYPE_MODULE BIT(0)
|
||||
#define SPL_DOMAINTYPE_WIFI BIT(1)
|
||||
#define SPL_DOMAINTYPE_WIGIG BIT(2)
|
||||
#define SPL_DOMAINTYPE_RFEM BIT(3)
|
||||
#define ACPI_SPLC_METHOD "SPLC"
|
||||
#define ACPI_SPLC_DOMAIN_WIFI (0x07)
|
||||
|
||||
static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
|
||||
static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
|
||||
{
|
||||
union acpi_object *limits, *domain_type, *power_limit;
|
||||
union acpi_object *data_pkg, *dflt_pwr_limit;
|
||||
int i;
|
||||
|
||||
if (splx->type != ACPI_TYPE_PACKAGE ||
|
||||
splx->package.count != 2 ||
|
||||
splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
|
||||
splx->package.elements[0].integer.value != 0) {
|
||||
IWL_ERR(trans, "Unsupported splx structure\n");
|
||||
/* We need at least two elements, one for the revision and one
|
||||
* for the data itself. Also check that the revision is
|
||||
* supported (currently only revision 0).
|
||||
*/
|
||||
if (splc->type != ACPI_TYPE_PACKAGE ||
|
||||
splc->package.count < 2 ||
|
||||
splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
|
||||
splc->package.elements[0].integer.value != 0) {
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"Unsupported structure returned by the SPLC method. Ignoring.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
limits = &splx->package.elements[1];
|
||||
if (limits->type != ACPI_TYPE_PACKAGE ||
|
||||
limits->package.count < 2 ||
|
||||
limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
|
||||
limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
|
||||
IWL_ERR(trans, "Invalid limits element\n");
|
||||
/* loop through all the packages to find the one for WiFi */
|
||||
for (i = 1; i < splc->package.count; i++) {
|
||||
union acpi_object *domain;
|
||||
|
||||
data_pkg = &splc->package.elements[i];
|
||||
|
||||
/* Skip anything that is not a package with the right
|
||||
* amount of elements (i.e. at least 2 integers).
|
||||
*/
|
||||
if (data_pkg->type != ACPI_TYPE_PACKAGE ||
|
||||
data_pkg->package.count < 2 ||
|
||||
data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
|
||||
data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
|
||||
continue;
|
||||
|
||||
domain = &data_pkg->package.elements[0];
|
||||
if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
|
||||
break;
|
||||
|
||||
data_pkg = NULL;
|
||||
}
|
||||
|
||||
if (!data_pkg) {
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"No element for the WiFi domain returned by the SPLC method.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
domain_type = &limits->package.elements[0];
|
||||
power_limit = &limits->package.elements[1];
|
||||
if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
|
||||
IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return power_limit->integer.value;
|
||||
dflt_pwr_limit = &data_pkg->package.elements[1];
|
||||
return dflt_pwr_limit->integer.value;
|
||||
}
|
||||
|
||||
static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
|
||||
{
|
||||
acpi_handle pxsx_handle;
|
||||
acpi_handle handle;
|
||||
struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
acpi_status status;
|
||||
|
||||
pxsx_handle = ACPI_HANDLE(&pdev->dev);
|
||||
|
@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
/* Get the method's handle */
|
||||
status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
|
||||
status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
|
||||
&handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_DEBUG_INFO(trans, "SPL method not found\n");
|
||||
IWL_DEBUG_INFO(trans, "SPLC method not found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Call SPLC with no arguments */
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &splx);
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &splc);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
|
||||
return;
|
||||
}
|
||||
|
||||
trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
|
||||
trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
|
||||
IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
|
||||
trans->dflt_pwr_limit);
|
||||
kfree(splx.pointer);
|
||||
kfree(splc.pointer);
|
||||
}
|
||||
|
||||
#else /* CONFIG_ACPI */
|
||||
|
|
|
@ -592,6 +592,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
|||
static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, u32 txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int ret;
|
||||
|
||||
txq->need_update = false;
|
||||
|
@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
|||
return ret;
|
||||
|
||||
spin_lock_init(&txq->lock);
|
||||
|
||||
if (txq_id == trans_pcie->cmd_queue) {
|
||||
static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
|
||||
|
||||
lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
|
||||
}
|
||||
|
||||
__skb_queue_head_init(&txq->overflow_q);
|
||||
|
||||
/*
|
||||
|
|
|
@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
|
|||
queue->rx_skbs[id] = skb;
|
||||
|
||||
ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
|
||||
BUG_ON((signed short)ref < 0);
|
||||
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
|
||||
queue->grant_rx_ref[id] = ref;
|
||||
|
||||
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
|
||||
|
@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
|
|||
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
|
||||
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
|
||||
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
|
||||
BUG_ON((signed short)ref < 0);
|
||||
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
|
||||
|
||||
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
|
||||
gfn, GNTMAP_readonly);
|
||||
|
|
|
@ -149,7 +149,7 @@ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
|
|||
{
|
||||
#if defined(CONFIG_NET_L3_MASTER_DEV)
|
||||
if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
|
||||
ipv6_l3mdev_skb(IP6CB(skb)->flags))
|
||||
skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
|
|
|
@ -3354,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
|||
bool is_skb_forwardable(const struct net_device *dev,
|
||||
const struct sk_buff *skb);
|
||||
|
||||
static __always_inline int ____dev_forward_skb(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
|
||||
unlikely(!is_skb_forwardable(dev, skb))) {
|
||||
atomic_long_inc(&dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
skb_scrub_packet(skb, true);
|
||||
skb->priority = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
extern int netdev_budget;
|
||||
|
|
|
@ -47,8 +47,7 @@ struct inet_skb_parm {
|
|||
#define IPSKB_REROUTED BIT(4)
|
||||
#define IPSKB_DOREDIRECT BIT(5)
|
||||
#define IPSKB_FRAG_PMTU BIT(6)
|
||||
#define IPSKB_FRAG_SEGS BIT(7)
|
||||
#define IPSKB_L3SLAVE BIT(8)
|
||||
#define IPSKB_L3SLAVE BIT(7)
|
||||
|
||||
u16 frag_max_size;
|
||||
};
|
||||
|
|
|
@ -146,6 +146,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
|
|||
{
|
||||
int pkt_len, err;
|
||||
|
||||
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
|
||||
pkt_len = skb->len - skb_inner_network_offset(skb);
|
||||
err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
|
||||
if (unlikely(net_xmit_eval(err)))
|
||||
|
|
|
@ -30,8 +30,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
|
|||
if (net->ct.labels_used == 0)
|
||||
return NULL;
|
||||
|
||||
return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS,
|
||||
sizeof(struct nf_conn_labels), GFP_ATOMIC);
|
||||
return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC);
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
|
|
|
@ -145,7 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
|
|||
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
|
||||
}
|
||||
|
||||
unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
|
||||
int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
|
||||
unsigned int nft_parse_register(const struct nlattr *attr);
|
||||
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
|
||||
|
||||
|
@ -542,7 +542,8 @@ void *nft_set_elem_init(const struct nft_set *set,
|
|||
const struct nft_set_ext_tmpl *tmpl,
|
||||
const u32 *key, const u32 *data,
|
||||
u64 timeout, gfp_t gfp);
|
||||
void nft_set_elem_destroy(const struct nft_set *set, void *elem);
|
||||
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
|
||||
bool destroy_expr);
|
||||
|
||||
/**
|
||||
* struct nft_set_gc_batch_head - nf_tables set garbage collection batch
|
||||
|
@ -693,7 +694,6 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
|
|||
{
|
||||
int err;
|
||||
|
||||
__module_get(src->ops->type->owner);
|
||||
if (src->ops->clone) {
|
||||
dst->ops = src->ops;
|
||||
err = src->ops->clone(dst, src);
|
||||
|
@ -702,6 +702,8 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
|
|||
} else {
|
||||
memcpy(dst, src, src->ops->size);
|
||||
}
|
||||
|
||||
__module_get(src->ops->type->owner);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ void sctp_unhash_endpoint(struct sctp_endpoint *);
|
|||
struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
|
||||
struct sctphdr *, struct sctp_association **,
|
||||
struct sctp_transport **);
|
||||
void sctp_err_finish(struct sock *, struct sctp_association *);
|
||||
void sctp_err_finish(struct sock *, struct sctp_transport *);
|
||||
void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
|
||||
struct sctp_transport *t, __u32 pmtu);
|
||||
void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
|
||||
|
|
|
@ -1596,11 +1596,11 @@ static inline void sock_put(struct sock *sk)
|
|||
void sock_gen_put(struct sock *sk);
|
||||
|
||||
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
|
||||
unsigned int trim_cap);
|
||||
unsigned int trim_cap, bool refcounted);
|
||||
static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
||||
const int nested)
|
||||
{
|
||||
return __sk_receive_skb(sk, skb, nested, 1);
|
||||
return __sk_receive_skb(sk, skb, nested, 1, true);
|
||||
}
|
||||
|
||||
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
|
||||
|
|
|
@ -805,7 +805,7 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
|
|||
{
|
||||
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
|
||||
if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
|
||||
ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
|
||||
skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
|
@ -1220,6 +1220,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
|
|||
|
||||
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
|
||||
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
|
||||
int tcp_filter(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
#undef STATE_TRACE
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
#include <linux/atmapi.h>
|
||||
#include <linux/atmioc.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
|
||||
/* get pool statistics */
|
||||
|
|
|
@ -5,9 +5,7 @@
|
|||
* Defines for the BPQETHER pseudo device driver
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_IF_ETHER_H
|
||||
#include <linux/if_ether.h>
|
||||
#endif
|
||||
|
||||
#define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */
|
||||
#define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1)
|
||||
|
|
|
@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab)
|
|||
|
||||
hlist_for_each_entry_safe(l, n, head, hash_node) {
|
||||
hlist_del_rcu(&l->hash_node);
|
||||
htab_elem_free(htab, l);
|
||||
if (l->state != HTAB_EXTRA_ELEM_USED)
|
||||
htab_elem_free(htab, l);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ static int map_create(union bpf_attr *attr)
|
|||
|
||||
err = bpf_map_charge_memlock(map);
|
||||
if (err)
|
||||
goto free_map;
|
||||
goto free_map_nouncharge;
|
||||
|
||||
err = bpf_map_new_fd(map);
|
||||
if (err < 0)
|
||||
|
@ -204,6 +204,8 @@ static int map_create(union bpf_attr *attr)
|
|||
return err;
|
||||
|
||||
free_map:
|
||||
bpf_map_uncharge_memlock(map);
|
||||
free_map_nouncharge:
|
||||
map->ops->map_free(map);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1
|
|||
[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
|
||||
[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
|
||||
|
||||
static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
|
||||
/*
|
||||
* We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
|
||||
* Make sure they are always aligned.
|
||||
*/
|
||||
static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
|
||||
[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
|
|
|
@ -1549,24 +1549,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
|
|||
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
|
||||
struct sock *sk = sock->sk;
|
||||
struct bcm_sock *bo = bcm_sk(sk);
|
||||
int ret = 0;
|
||||
|
||||
if (len < sizeof(*addr))
|
||||
return -EINVAL;
|
||||
|
||||
if (bo->bound)
|
||||
return -EISCONN;
|
||||
lock_sock(sk);
|
||||
|
||||
if (bo->bound) {
|
||||
ret = -EISCONN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* bind a device to this socket */
|
||||
if (addr->can_ifindex) {
|
||||
struct net_device *dev;
|
||||
|
||||
dev = dev_get_by_index(&init_net, addr->can_ifindex);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
if (!dev) {
|
||||
ret = -ENODEV;
|
||||
goto fail;
|
||||
}
|
||||
if (dev->type != ARPHRD_CAN) {
|
||||
dev_put(dev);
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
bo->ifindex = dev->ifindex;
|
||||
|
@ -1577,17 +1584,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
|
|||
bo->ifindex = 0;
|
||||
}
|
||||
|
||||
bo->bound = 1;
|
||||
|
||||
if (proc_dir) {
|
||||
/* unique socket address as filename */
|
||||
sprintf(bo->procname, "%lu", sock_i_ino(sk));
|
||||
bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
|
||||
proc_dir,
|
||||
&bcm_proc_fops, sk);
|
||||
if (!bo->bcm_proc_read) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
bo->bound = 1;
|
||||
|
||||
fail:
|
||||
release_sock(sk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
|
|
|
@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
|
|||
|
||||
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
|
||||
unlikely(!is_skb_forwardable(dev, skb))) {
|
||||
atomic_long_inc(&dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
int ret = ____dev_forward_skb(dev, skb);
|
||||
|
||||
if (likely(!ret)) {
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
||||
}
|
||||
|
||||
skb_scrub_packet(skb, true);
|
||||
skb->priority = 0;
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__dev_forward_skb);
|
||||
|
||||
|
@ -2484,7 +2479,7 @@ int skb_checksum_help(struct sk_buff *skb)
|
|||
goto out;
|
||||
}
|
||||
|
||||
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
|
||||
*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
|
||||
out_set_summed:
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
out:
|
||||
|
|
|
@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
|
|||
return dev_forward_skb(dev, skb);
|
||||
}
|
||||
|
||||
static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int ret = ____dev_forward_skb(dev, skb);
|
||||
|
||||
if (likely(!ret)) {
|
||||
skb->dev = dev;
|
||||
ret = netif_rx(skb);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
|
||||
u32 flags)
|
||||
{
|
||||
/* skb->mac_len is not set on normal egress */
|
||||
unsigned int mlen = skb->network_header - skb->mac_header;
|
||||
|
||||
__skb_pull(skb, mlen);
|
||||
|
||||
/* At ingress, the mac header has already been pulled once.
|
||||
* At egress, skb_pospull_rcsum has to be done in case that
|
||||
* the skb is originated from ingress (i.e. a forwarded skb)
|
||||
* to ensure that rcsum starts at net header.
|
||||
*/
|
||||
if (!skb_at_tc_ingress(skb))
|
||||
skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
|
||||
skb_pop_mac_header(skb);
|
||||
skb_reset_mac_len(skb);
|
||||
return flags & BPF_F_INGRESS ?
|
||||
__bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
|
||||
}
|
||||
|
||||
static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
|
||||
u32 flags)
|
||||
{
|
||||
bpf_push_mac_rcsum(skb);
|
||||
return flags & BPF_F_INGRESS ?
|
||||
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
|
||||
}
|
||||
|
||||
static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
|
||||
u32 flags)
|
||||
{
|
||||
switch (dev->type) {
|
||||
case ARPHRD_TUNNEL:
|
||||
case ARPHRD_TUNNEL6:
|
||||
case ARPHRD_SIT:
|
||||
case ARPHRD_IPGRE:
|
||||
case ARPHRD_VOID:
|
||||
case ARPHRD_NONE:
|
||||
return __bpf_redirect_no_mac(skb, dev, flags);
|
||||
default:
|
||||
return __bpf_redirect_common(skb, dev, flags);
|
||||
}
|
||||
}
|
||||
|
||||
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
|
||||
{
|
||||
struct net_device *dev;
|
||||
|
@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bpf_push_mac_rcsum(clone);
|
||||
|
||||
return flags & BPF_F_INGRESS ?
|
||||
__bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
|
||||
return __bpf_redirect(clone, dev, flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_clone_redirect_proto = {
|
||||
|
@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
bpf_push_mac_rcsum(skb);
|
||||
|
||||
return ri->flags & BPF_F_INGRESS ?
|
||||
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
|
||||
return __bpf_redirect(skb, dev, ri->flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_redirect_proto = {
|
||||
|
|
|
@ -122,7 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
|
|||
struct flow_dissector_key_keyid *key_keyid;
|
||||
bool skip_vlan = false;
|
||||
u8 ip_proto = 0;
|
||||
bool ret = false;
|
||||
bool ret;
|
||||
|
||||
if (!data) {
|
||||
data = skb->data;
|
||||
|
@ -549,12 +549,17 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
|
|||
out_good:
|
||||
ret = true;
|
||||
|
||||
out_bad:
|
||||
key_control->thoff = (u16)nhoff;
|
||||
out:
|
||||
key_basic->n_proto = proto;
|
||||
key_basic->ip_proto = ip_proto;
|
||||
key_control->thoff = (u16)nhoff;
|
||||
|
||||
return ret;
|
||||
|
||||
out_bad:
|
||||
ret = false;
|
||||
key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_flow_dissect);
|
||||
|
||||
|
|
|
@ -275,6 +275,7 @@ int rtnl_unregister(int protocol, int msgtype)
|
|||
|
||||
rtnl_msg_handlers[protocol][msgindex].doit = NULL;
|
||||
rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
|
||||
rtnl_msg_handlers[protocol][msgindex].calcit = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
EXPORT_SYMBOL(sock_queue_rcv_skb);
|
||||
|
||||
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
||||
const int nested, unsigned int trim_cap)
|
||||
const int nested, unsigned int trim_cap, bool refcounted)
|
||||
{
|
||||
int rc = NET_RX_SUCCESS;
|
||||
|
||||
|
@ -487,7 +487,8 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
|
|||
|
||||
bh_unlock_sock(sk);
|
||||
out:
|
||||
sock_put(sk);
|
||||
if (refcounted)
|
||||
sock_put(sk);
|
||||
return rc;
|
||||
discard_and_relse:
|
||||
kfree_skb(skb);
|
||||
|
@ -1543,6 +1544,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
|
||||
|
||||
newsk->sk_err = 0;
|
||||
newsk->sk_err_soft = 0;
|
||||
newsk->sk_priority = 0;
|
||||
newsk->sk_incoming_cpu = raw_smp_processor_id();
|
||||
atomic64_set(&newsk->sk_cookie, 0);
|
||||
|
|
|
@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
|
|||
{
|
||||
const struct iphdr *iph = (struct iphdr *)skb->data;
|
||||
const u8 offset = iph->ihl << 2;
|
||||
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
|
||||
const struct dccp_hdr *dh;
|
||||
struct dccp_sock *dp;
|
||||
struct inet_sock *inet;
|
||||
const int type = icmp_hdr(skb)->type;
|
||||
|
@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
|
|||
int err;
|
||||
struct net *net = dev_net(skb->dev);
|
||||
|
||||
if (skb->len < offset + sizeof(*dh) ||
|
||||
skb->len < offset + __dccp_basic_hdr_len(dh)) {
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
/* Only need dccph_dport & dccph_sport which are the first
|
||||
* 4 bytes in dccp header.
|
||||
* Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
|
||||
*/
|
||||
BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
|
||||
BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
|
||||
dh = (struct dccp_hdr *)(skb->data + offset);
|
||||
|
||||
sk = __inet_lookup_established(net, &dccp_hashinfo,
|
||||
iph->daddr, dh->dccph_dport,
|
||||
|
@ -868,7 +870,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
|
|||
goto discard_and_relse;
|
||||
nf_reset(skb);
|
||||
|
||||
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
|
||||
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
|
||||
|
||||
no_dccp_socket:
|
||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
|
|
|
@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
|
||||
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
|
||||
const struct dccp_hdr *dh;
|
||||
struct dccp_sock *dp;
|
||||
struct ipv6_pinfo *np;
|
||||
struct sock *sk;
|
||||
|
@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|||
__u64 seq;
|
||||
struct net *net = dev_net(skb->dev);
|
||||
|
||||
if (skb->len < offset + sizeof(*dh) ||
|
||||
skb->len < offset + __dccp_basic_hdr_len(dh)) {
|
||||
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
/* Only need dccph_dport & dccph_sport which are the first
|
||||
* 4 bytes in dccp header.
|
||||
* Our caller (icmpv6_notify()) already pulled 8 bytes for us.
|
||||
*/
|
||||
BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
|
||||
BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
|
||||
dh = (struct dccp_hdr *)(skb->data + offset);
|
||||
|
||||
sk = __inet6_lookup_established(net, &dccp_hashinfo,
|
||||
&hdr->daddr, dh->dccph_dport,
|
||||
|
@ -738,7 +739,8 @@ static int dccp_v6_rcv(struct sk_buff *skb)
|
|||
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
|
||||
goto discard_and_relse;
|
||||
|
||||
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
|
||||
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
|
||||
refcounted) ? -1 : 0;
|
||||
|
||||
no_dccp_socket:
|
||||
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
|
@ -956,6 +958,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
|
|||
.getsockopt = ipv6_getsockopt,
|
||||
.addr2sockaddr = inet6_csk_addr2sockaddr,
|
||||
.sockaddr_len = sizeof(struct sockaddr_in6),
|
||||
.bind_conflict = inet6_csk_bind_conflict,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_setsockopt = compat_ipv6_setsockopt,
|
||||
.compat_getsockopt = compat_ipv6_getsockopt,
|
||||
|
|
|
@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout)
|
|||
__kfree_skb(skb);
|
||||
}
|
||||
|
||||
/* If socket has been already reset kill it. */
|
||||
if (sk->sk_state == DCCP_CLOSED)
|
||||
goto adjudge_to_death;
|
||||
|
||||
if (data_was_unread) {
|
||||
/* Unread data was tossed, send an appropriate Reset Code */
|
||||
DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
|
||||
|
|
|
@ -533,9 +533,9 @@ EXPORT_SYMBOL(inet_dgram_connect);
|
|||
|
||||
static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
sk->sk_write_pending += writebias;
|
||||
|
||||
/* Basic assumption: if someone sets sk->sk_err, he _must_
|
||||
|
@ -545,13 +545,12 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
|
|||
*/
|
||||
while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
|
||||
release_sock(sk);
|
||||
timeo = schedule_timeout(timeo);
|
||||
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
|
||||
lock_sock(sk);
|
||||
if (signal_pending(current) || !timeo)
|
||||
break;
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
sk->sk_write_pending -= writebias;
|
||||
return timeo;
|
||||
}
|
||||
|
|
|
@ -2413,22 +2413,19 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
|
|||
struct key_vector *l, **tp = &iter->tnode;
|
||||
t_key key;
|
||||
|
||||
/* use cache location of next-to-find key */
|
||||
/* use cached location of previously found key */
|
||||
if (iter->pos > 0 && pos >= iter->pos) {
|
||||
pos -= iter->pos;
|
||||
key = iter->key;
|
||||
} else {
|
||||
iter->pos = 0;
|
||||
iter->pos = 1;
|
||||
key = 0;
|
||||
}
|
||||
|
||||
while ((l = leaf_walk_rcu(tp, key)) != NULL) {
|
||||
pos -= iter->pos;
|
||||
|
||||
while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) {
|
||||
key = l->key + 1;
|
||||
iter->pos++;
|
||||
|
||||
if (--pos <= 0)
|
||||
break;
|
||||
|
||||
l = NULL;
|
||||
|
||||
/* handle unlikely case of a key wrap */
|
||||
|
@ -2437,7 +2434,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
|
|||
}
|
||||
|
||||
if (l)
|
||||
iter->key = key; /* remember it */
|
||||
iter->key = l->key; /* remember it */
|
||||
else
|
||||
iter->pos = 0; /* forget it */
|
||||
|
||||
|
@ -2465,7 +2462,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
return fib_route_get_idx(iter, *pos);
|
||||
|
||||
iter->pos = 0;
|
||||
iter->key = 0;
|
||||
iter->key = KEY_MAX;
|
||||
|
||||
return SEQ_START_TOKEN;
|
||||
}
|
||||
|
@ -2474,7 +2471,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
{
|
||||
struct fib_route_iter *iter = seq->private;
|
||||
struct key_vector *l = NULL;
|
||||
t_key key = iter->key;
|
||||
t_key key = iter->key + 1;
|
||||
|
||||
++*pos;
|
||||
|
||||
|
@ -2483,7 +2480,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
l = leaf_walk_rcu(&iter->tnode, key);
|
||||
|
||||
if (l) {
|
||||
iter->key = l->key + 1;
|
||||
iter->key = l->key;
|
||||
iter->pos++;
|
||||
} else {
|
||||
iter->pos = 0;
|
||||
|
|
|
@ -477,7 +477,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
|||
fl4->flowi4_proto = IPPROTO_ICMP;
|
||||
fl4->fl4_icmp_type = type;
|
||||
fl4->fl4_icmp_code = code;
|
||||
fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev);
|
||||
fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
|
||||
|
||||
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
|
||||
rt = __ip_route_output_key_hash(net, fl4,
|
||||
|
@ -502,7 +502,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
|||
if (err)
|
||||
goto relookup_failed;
|
||||
|
||||
if (inet_addr_type_dev_table(net, skb_in->dev,
|
||||
if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
|
||||
fl4_dec.saddr) == RTN_LOCAL) {
|
||||
rt2 = __ip_route_output_key(net, &fl4_dec);
|
||||
if (IS_ERR(rt2))
|
||||
|
|
|
@ -117,7 +117,7 @@ int ip_forward(struct sk_buff *skb)
|
|||
if (opt->is_strictroute && rt->rt_uses_gateway)
|
||||
goto sr_failed;
|
||||
|
||||
IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS;
|
||||
IPCB(skb)->flags |= IPSKB_FORWARDED;
|
||||
mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
|
||||
if (ip_exceeds_mtu(skb, mtu)) {
|
||||
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
|
||||
|
|
|
@ -239,19 +239,23 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
|
|||
struct sk_buff *segs;
|
||||
int ret = 0;
|
||||
|
||||
/* common case: fragmentation of segments is not allowed,
|
||||
* or seglen is <= mtu
|
||||
/* common case: seglen is <= mtu
|
||||
*/
|
||||
if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) ||
|
||||
skb_gso_validate_mtu(skb, mtu))
|
||||
if (skb_gso_validate_mtu(skb, mtu))
|
||||
return ip_finish_output2(net, sk, skb);
|
||||
|
||||
/* Slowpath - GSO segment length is exceeding the dst MTU.
|
||||
/* Slowpath - GSO segment length exceeds the egress MTU.
|
||||
*
|
||||
* This can happen in two cases:
|
||||
* 1) TCP GRO packet, DF bit not set
|
||||
* 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
|
||||
* from host network stack.
|
||||
* This can happen in several cases:
|
||||
* - Forwarding of a TCP GRO skb, when DF flag is not set.
|
||||
* - Forwarding of an skb that arrived on a virtualization interface
|
||||
* (virtio-net/vhost/tap) with TSO/GSO size set by other network
|
||||
* stack.
|
||||
* - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
|
||||
* interface with a smaller MTU.
|
||||
* - Arriving GRO skb (or GSO skb in a virtualized environment) that is
|
||||
* bridged to a NETIF_F_TSO tunnel stacked over an interface with an
|
||||
* insufficent MTU.
|
||||
*/
|
||||
features = netif_skb_features(skb);
|
||||
BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
|
||||
|
@ -1579,7 +1583,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
oif = arg->bound_dev_if;
|
||||
oif = oif ? : skb->skb_iif;
|
||||
if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
|
||||
oif = skb->skb_iif;
|
||||
|
||||
flowi4_init_output(&fl4, oif,
|
||||
IP4_REPLY_MARK(net, skb->mark),
|
||||
|
|
|
@ -63,7 +63,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
|
|||
int pkt_len = skb->len - skb_inner_network_offset(skb);
|
||||
struct net *net = dev_net(rt->dst.dev);
|
||||
struct net_device *dev = skb->dev;
|
||||
int skb_iif = skb->skb_iif;
|
||||
struct iphdr *iph;
|
||||
int err;
|
||||
|
||||
|
@ -73,16 +72,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
|
|||
skb_dst_set(skb, &rt->dst);
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
|
||||
if (skb_iif && !(df & htons(IP_DF))) {
|
||||
/* Arrived from an ingress interface, got encapsulated, with
|
||||
* fragmentation of encapulating frames allowed.
|
||||
* If skb is gso, the resulting encapsulated network segments
|
||||
* may exceed dst mtu.
|
||||
* Allow IP Fragmentation of segments.
|
||||
*/
|
||||
IPCB(skb)->flags |= IPSKB_FRAG_SEGS;
|
||||
}
|
||||
|
||||
/* Push down and install the IP header. */
|
||||
skb_push(skb, sizeof(struct iphdr));
|
||||
skb_reset_network_header(skb);
|
||||
|
|
|
@ -1749,7 +1749,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
|
|||
vif->dev->stats.tx_bytes += skb->len;
|
||||
}
|
||||
|
||||
IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS;
|
||||
IPCB(skb)->flags |= IPSKB_FORWARDED;
|
||||
|
||||
/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
|
||||
* not only before forwarding, but after forwarding on all output
|
||||
|
|
|
@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr,
|
|||
struct in_addr gw = {
|
||||
.s_addr = (__force __be32)regs->data[priv->sreg_addr],
|
||||
};
|
||||
int oif = regs->data[priv->sreg_dev];
|
||||
int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
|
||||
|
||||
nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif);
|
||||
}
|
||||
|
@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
|||
{
|
||||
struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
|
||||
|
||||
if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
|
||||
if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
|
||||
goto nla_put_failure;
|
||||
if (priv->sreg_dev &&
|
||||
nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
|
||||
goto nla_put_failure;
|
||||
|
||||
|
|
|
@ -753,7 +753,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
|
|||
goto reject_redirect;
|
||||
}
|
||||
|
||||
n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
|
||||
n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
|
||||
if (!n)
|
||||
n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
|
||||
if (!IS_ERR(n)) {
|
||||
if (!(n->nud_state & NUD_VALID)) {
|
||||
neigh_event_send(n, NULL);
|
||||
|
|
|
@ -1164,7 +1164,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
|
||||
err = -EPIPE;
|
||||
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
|
||||
goto out_err;
|
||||
goto do_error;
|
||||
|
||||
sg = !!(sk->sk_route_caps & NETIF_F_SG);
|
||||
|
||||
|
@ -1241,7 +1241,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
|
||||
if (!skb_can_coalesce(skb, i, pfrag->page,
|
||||
pfrag->offset)) {
|
||||
if (i == sysctl_max_skb_frags || !sg) {
|
||||
if (i >= sysctl_max_skb_frags || !sg) {
|
||||
tcp_mark_push(tp, skb);
|
||||
goto new_segment;
|
||||
}
|
||||
|
|
|
@ -56,6 +56,7 @@ struct dctcp {
|
|||
u32 next_seq;
|
||||
u32 ce_state;
|
||||
u32 delayed_ack_reserved;
|
||||
u32 loss_cwnd;
|
||||
};
|
||||
|
||||
static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
|
||||
|
@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk)
|
|||
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
|
||||
|
||||
ca->delayed_ack_reserved = 0;
|
||||
ca->loss_cwnd = 0;
|
||||
ca->ce_state = 0;
|
||||
|
||||
dctcp_reset(tp, ca);
|
||||
|
@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk)
|
|||
|
||||
static u32 dctcp_ssthresh(struct sock *sk)
|
||||
{
|
||||
const struct dctcp *ca = inet_csk_ca(sk);
|
||||
struct dctcp *ca = inet_csk_ca(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
ca->loss_cwnd = tp->snd_cwnd;
|
||||
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
|
||||
}
|
||||
|
||||
|
@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 dctcp_cwnd_undo(struct sock *sk)
|
||||
{
|
||||
const struct dctcp *ca = inet_csk_ca(sk);
|
||||
|
||||
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
|
||||
}
|
||||
|
||||
static struct tcp_congestion_ops dctcp __read_mostly = {
|
||||
.init = dctcp_init,
|
||||
.in_ack_event = dctcp_update_alpha,
|
||||
.cwnd_event = dctcp_cwnd_event,
|
||||
.ssthresh = dctcp_ssthresh,
|
||||
.cong_avoid = tcp_reno_cong_avoid,
|
||||
.undo_cwnd = dctcp_cwnd_undo,
|
||||
.set_state = dctcp_state,
|
||||
.get_info = dctcp_get_info,
|
||||
.flags = TCP_CONG_NEEDS_ECN,
|
||||
|
|
|
@ -1564,6 +1564,21 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
EXPORT_SYMBOL(tcp_add_backlog);
|
||||
|
||||
int tcp_filter(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcphdr *th = (struct tcphdr *)skb->data;
|
||||
unsigned int eaten = skb->len;
|
||||
int err;
|
||||
|
||||
err = sk_filter_trim_cap(sk, skb, th->doff * 4);
|
||||
if (!err) {
|
||||
eaten -= skb->len;
|
||||
TCP_SKB_CB(skb)->end_seq -= eaten;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_filter);
|
||||
|
||||
/*
|
||||
* From tcp_input.c
|
||||
*/
|
||||
|
@ -1676,8 +1691,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
|
|||
|
||||
nf_reset(skb);
|
||||
|
||||
if (sk_filter(sk, skb))
|
||||
if (tcp_filter(sk, skb))
|
||||
goto discard_and_relse;
|
||||
th = (const struct tcphdr *)skb->data;
|
||||
iph = ip_hdr(skb);
|
||||
|
||||
skb->dev = NULL;
|
||||
|
||||
|
|
|
@ -448,7 +448,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|||
if (__ipv6_addr_needs_scope_id(addr_type))
|
||||
iif = skb->dev->ifindex;
|
||||
else
|
||||
iif = l3mdev_master_ifindex(skb->dev);
|
||||
iif = l3mdev_master_ifindex(skb_dst(skb)->dev);
|
||||
|
||||
/*
|
||||
* Must not send error if the source does not uniquely
|
||||
|
|
|
@ -1366,7 +1366,7 @@ static int __ip6_append_data(struct sock *sk,
|
|||
if (((length > mtu) ||
|
||||
(skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
|
||||
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen, exthdrlen,
|
||||
|
|
|
@ -88,9 +88,6 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
|
|||
|
||||
uh->len = htons(skb->len);
|
||||
|
||||
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
||||
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
|
||||
| IPSKB_REROUTED);
|
||||
skb_dst_set(skb, dst);
|
||||
|
||||
udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);
|
||||
|
|
|
@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr,
|
|||
{
|
||||
struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
|
||||
struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr];
|
||||
int oif = regs->data[priv->sreg_dev];
|
||||
int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
|
||||
|
||||
nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif);
|
||||
}
|
||||
|
@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
|||
{
|
||||
struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
|
||||
|
||||
if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
|
||||
if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
|
||||
goto nla_put_failure;
|
||||
if (priv->sreg_dev &&
|
||||
nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
|
||||
goto nla_put_failure;
|
||||
|
||||
|
|
|
@ -1364,6 +1364,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
|||
if (rt6->rt6i_flags & RTF_LOCAL)
|
||||
return;
|
||||
|
||||
if (dst_metric_locked(dst, RTAX_MTU))
|
||||
return;
|
||||
|
||||
dst_confirm(dst);
|
||||
mtu = max_t(u32, mtu, IPV6_MIN_MTU);
|
||||
if (mtu >= dst_mtu(dst))
|
||||
|
@ -2758,6 +2761,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
|
|||
PMTU discouvery.
|
||||
*/
|
||||
if (rt->dst.dev == arg->dev &&
|
||||
dst_metric_raw(&rt->dst, RTAX_MTU) &&
|
||||
!dst_metric_locked(&rt->dst, RTAX_MTU)) {
|
||||
if (rt->rt6i_flags & RTF_CACHE) {
|
||||
/* For RTF_CACHE with rt6i_pmtu == 0
|
||||
|
|
|
@ -818,8 +818,12 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
|
|||
fl6.flowi6_proto = IPPROTO_TCP;
|
||||
if (rt6_need_strict(&fl6.daddr) && !oif)
|
||||
fl6.flowi6_oif = tcp_v6_iif(skb);
|
||||
else
|
||||
fl6.flowi6_oif = oif ? : skb->skb_iif;
|
||||
else {
|
||||
if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
|
||||
oif = skb->skb_iif;
|
||||
|
||||
fl6.flowi6_oif = oif;
|
||||
}
|
||||
|
||||
fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
|
||||
fl6.fl6_dport = t1->dest;
|
||||
|
@ -1225,7 +1229,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
if (skb->protocol == htons(ETH_P_IP))
|
||||
return tcp_v4_do_rcv(sk, skb);
|
||||
|
||||
if (sk_filter(sk, skb))
|
||||
if (tcp_filter(sk, skb))
|
||||
goto discard;
|
||||
|
||||
/*
|
||||
|
@ -1453,8 +1457,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
|
|||
if (tcp_v6_inbound_md5_hash(sk, skb))
|
||||
goto discard_and_relse;
|
||||
|
||||
if (sk_filter(sk, skb))
|
||||
if (tcp_filter(sk, skb))
|
||||
goto discard_and_relse;
|
||||
th = (const struct tcphdr *)skb->data;
|
||||
hdr = ipv6_hdr(skb);
|
||||
|
||||
skb->dev = NULL;
|
||||
|
||||
|
|
|
@ -2845,7 +2845,7 @@ static struct genl_family ip_vs_genl_family = {
|
|||
.hdrsize = 0,
|
||||
.name = IPVS_GENL_NAME,
|
||||
.version = IPVS_GENL_VERSION,
|
||||
.maxattr = IPVS_CMD_MAX,
|
||||
.maxattr = IPVS_CMD_ATTR_MAX,
|
||||
.netnsok = true, /* Make ipvsadm to work on netns */
|
||||
};
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue