mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix memory leak in iwlwifi, from Matti Gottlieb. 2) Add missing registration of netfilter arp_tables into initial namespace, from Florian Westphal. 3) Fix potential NULL deref in DecNET routing code. 4) Restrict NETLINK_URELEASE to truly bound sockets only, from Dmitry Ivanov. 5) Fix dst ref counting in VRF, from David Ahern. 6) Fix TSO segmenting limits in i40e driver, from Alexander Duyck. 7) Fix heap leak in PACKET_DIAG_MCLIST, from Mathias Krause. 8) Ravalidate IPV6 datagram socket cached routes properly, particularly with UDP, from Martin KaFai Lau. 9) Fix endian bug in RDS dp_ack_seq handling, from Qing Huang. 10) Fix stats typing in bcmgenet driver, from Eric Dumazet. 11) Openvswitch needs to orphan SKBs before ipv6 fragmentation handing, from Joe Stringer. 12) SPI device reference leak in spi_ks8895 PHY driver, from Mark Brown. 13) atl2 doesn't actually support scatter-gather, so don't advertise the feature. From Ben Hucthings. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (72 commits) openvswitch: use flow protocol when recalculating ipv6 checksums Driver: Vmxnet3: set CHECKSUM_UNNECESSARY for IPv6 packets atl2: Disable unimplemented scatter/gather feature net/mlx4_en: Split SW RX dropped counter per RX ring net/mlx4_core: Don't allow to VF change global pause settings net/mlx4_core: Avoid repeated calls to pci enable/disable net/mlx4_core: Implement pci_resume callback net: phy: spi_ks8895: Don't leak references to SPI devices net: ethernet: davinci_emac: Fix platform_data overwrite net: ethernet: davinci_emac: Fix Unbalanced pm_runtime_enable qede: Fix single MTU sized packet from firmware GRO flow qede: Fix setting Skb network header qede: Fix various memory allocation error flows for fastpath tcp: Merge tx_flags and tskey in tcp_shifted_skb tcp: Merge tx_flags and tskey in tcp_collapse_retrans drivers: net: cpsw: fix wrong regs access in cpsw_ndo_open tcp: Fix SOF_TIMESTAMPING_TX_ACK when handling dup acks openvswitch: Orphan skbs before IPv6 defrag Revert "Prevent NUll pointer dereference with two PHYs on cpsw" VSOCK: Only check error on skb_recv_datagram when skb is NULL ...
This commit is contained in:
commit
c5edde3a81
|
@ -9,7 +9,8 @@ have dual GMAC each represented by a child node..
|
|||
Required properties:
|
||||
- compatible: Should be "mediatek,mt7623-eth"
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupts: Should contain the frame engines interrupt
|
||||
- interrupts: Should contain the three frame engines interrupts in numeric
|
||||
order. These are fe_int0, fe_int1 and fe_int2.
|
||||
- clocks: the clock used by the core
|
||||
- clock-names: the names of the clock listed in the clocks property. These are
|
||||
"ethif", "esw", "gp2", "gp1"
|
||||
|
@ -42,7 +43,9 @@ eth: ethernet@1b100000 {
|
|||
<ðsys CLK_ETHSYS_GP2>,
|
||||
<ðsys CLK_ETHSYS_GP1>;
|
||||
clock-names = "ethif", "esw", "gp2", "gp1";
|
||||
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
|
||||
GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
|
||||
GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
|
||||
power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
|
||||
resets = <ðsys MT2701_ETHSYS_ETH_RST>;
|
||||
reset-names = "eth";
|
||||
|
|
|
@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
|
|||
return false;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
|
||||
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
|
||||
struct bcma_device *core)
|
||||
{
|
||||
|
@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
|
|||
struct of_phandle_args out_irq;
|
||||
int ret;
|
||||
|
||||
if (!parent || !parent->dev.of_node)
|
||||
if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
|
||||
return 0;
|
||||
|
||||
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
|
||||
|
@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
|
|||
{
|
||||
struct device_node *node;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_OF_IRQ))
|
||||
return;
|
||||
|
||||
node = bcma_of_find_child_device(parent, core);
|
||||
if (node)
|
||||
core->dev.of_node = node;
|
||||
|
||||
core->irq = bcma_of_get_irq(parent, core, 0);
|
||||
}
|
||||
#else
|
||||
static void bcma_of_fill_device(struct platform_device *parent,
|
||||
struct bcma_device *core)
|
||||
{
|
||||
}
|
||||
static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
|
||||
struct bcma_device *core, int num)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
unsigned int bcma_core_irq(struct bcma_device *core, int num)
|
||||
{
|
||||
|
|
|
@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
|||
if (!maddr || maddr->family != AF_ISDN)
|
||||
return -EINVAL;
|
||||
|
||||
if (addr_len < sizeof(struct sockaddr_mISDN))
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if (_pms(sk)->dev) {
|
||||
|
|
|
@ -195,6 +195,7 @@ config GENEVE
|
|||
|
||||
config MACSEC
|
||||
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
|
||||
select CRYPTO
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_GCM
|
||||
---help---
|
||||
|
|
|
@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
|
|||
struct net_device *bridge)
|
||||
{
|
||||
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
||||
u16 fid;
|
||||
int i, err;
|
||||
|
||||
mutex_lock(&ps->smi_mutex);
|
||||
|
||||
/* Get or create the bridge FID and assign it to the port */
|
||||
for (i = 0; i < ps->num_ports; ++i)
|
||||
if (ps->ports[i].bridge_dev == bridge)
|
||||
break;
|
||||
|
||||
if (i < ps->num_ports)
|
||||
err = _mv88e6xxx_port_fid_get(ds, i, &fid);
|
||||
else
|
||||
err = _mv88e6xxx_fid_new(ds, &fid);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
err = _mv88e6xxx_port_fid_set(ds, port, fid);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
/* Assign the bridge and remap each port's VLANTable */
|
||||
ps->ports[port].bridge_dev = bridge;
|
||||
|
||||
|
@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
|
|||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&ps->smi_mutex);
|
||||
|
||||
return err;
|
||||
|
@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
|
|||
{
|
||||
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
||||
struct net_device *bridge = ps->ports[port].bridge_dev;
|
||||
u16 fid;
|
||||
int i;
|
||||
|
||||
mutex_lock(&ps->smi_mutex);
|
||||
|
||||
/* Give the port a fresh Filtering Information Database */
|
||||
if (_mv88e6xxx_fid_new(ds, &fid) ||
|
||||
_mv88e6xxx_port_fid_set(ds, port, fid))
|
||||
netdev_warn(ds->ports[port], "failed to assign a new FID\n");
|
||||
|
||||
/* Unassign the bridge and remap each port's VLANTable */
|
||||
ps->ports[port].bridge_dev = NULL;
|
||||
|
||||
|
@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
|
|||
* the other bits clear.
|
||||
*/
|
||||
reg = 1 << port;
|
||||
/* Disable learning for DSA and CPU ports */
|
||||
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
|
||||
reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
|
||||
/* Disable learning for CPU port */
|
||||
if (dsa_is_cpu_port(ds, port))
|
||||
reg = 0;
|
||||
|
||||
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
|
||||
if (ret)
|
||||
|
@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
|
|||
if (ret)
|
||||
goto abort;
|
||||
|
||||
/* Port based VLAN map: give each port its own address
|
||||
/* Port based VLAN map: give each port the same default address
|
||||
* database, and allow bidirectional communication between the
|
||||
* CPU and DSA port(s), and the other ports.
|
||||
*/
|
||||
ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
|
||||
ret = _mv88e6xxx_port_fid_set(ds, port, 0);
|
||||
if (ret)
|
||||
goto abort;
|
||||
|
||||
|
|
|
@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
err = -EIO;
|
||||
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
|
||||
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
|
||||
|
||||
/* Init PHY as early as possible due to power saving issue */
|
||||
|
|
|
@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
|
|||
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
|
||||
}
|
||||
|
||||
/* This (reset &) enable is not preset in specs or reference driver but
|
||||
* Broadcom does it in arch PCI code when enabling fake PCI device.
|
||||
*/
|
||||
bcma_core_enable(core, 0);
|
||||
|
||||
/* Allocation and references */
|
||||
net_dev = alloc_etherdev(sizeof(*bgmac));
|
||||
if (!net_dev)
|
||||
|
|
|
@ -199,9 +199,9 @@
|
|||
#define BGMAC_CMDCFG_TAI 0x00000200
|
||||
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
|
||||
#define BGMAC_CMDCFG_HD_SHIFT 10
|
||||
#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
|
||||
#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
|
||||
#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
|
||||
#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
|
||||
#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
|
||||
#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
|
||||
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
|
||||
#define BGMAC_CMDCFG_AE 0x00400000
|
||||
#define BGMAC_CMDCFG_CFE 0x00800000
|
||||
|
|
|
@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
|
|||
else
|
||||
p = (char *)priv;
|
||||
p += s->stat_offset;
|
||||
data[i] = *(u32 *)p;
|
||||
if (sizeof(unsigned long) != sizeof(u32) &&
|
||||
s->stat_sizeof == sizeof(unsigned long))
|
||||
data[i] = *(unsigned long *)p;
|
||||
else
|
||||
data[i] = *(u32 *)p;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
|
|||
}
|
||||
|
||||
lmac++;
|
||||
if (lmac == MAX_LMAC_PER_BGX)
|
||||
if (lmac == MAX_LMAC_PER_BGX) {
|
||||
of_node_put(node);
|
||||
break;
|
||||
}
|
||||
}
|
||||
of_node_put(node);
|
||||
return 0;
|
||||
|
||||
defer:
|
||||
|
|
|
@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
|
|||
unsigned int mmd, unsigned int reg, u16 *valp);
|
||||
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
|
||||
unsigned int mmd, unsigned int reg, u16 val);
|
||||
int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||
unsigned int vf, unsigned int iqtype, unsigned int iqid,
|
||||
unsigned int fl0id, unsigned int fl1id);
|
||||
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||
unsigned int vf, unsigned int iqtype, unsigned int iqid,
|
||||
unsigned int fl0id, unsigned int fl1id);
|
||||
|
|
|
@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
|
|||
void t4_free_sge_resources(struct adapter *adap)
|
||||
{
|
||||
int i;
|
||||
struct sge_eth_rxq *eq = adap->sge.ethrxq;
|
||||
struct sge_eth_txq *etq = adap->sge.ethtxq;
|
||||
struct sge_eth_rxq *eq;
|
||||
struct sge_eth_txq *etq;
|
||||
|
||||
/* stop all Rx queues in order to start them draining */
|
||||
for (i = 0; i < adap->sge.ethqsets; i++) {
|
||||
eq = &adap->sge.ethrxq[i];
|
||||
if (eq->rspq.desc)
|
||||
t4_iq_stop(adap, adap->mbox, adap->pf, 0,
|
||||
FW_IQ_TYPE_FL_INT_CAP,
|
||||
eq->rspq.cntxt_id,
|
||||
eq->fl.size ? eq->fl.cntxt_id : 0xffff,
|
||||
0xffff);
|
||||
}
|
||||
|
||||
/* clean up Ethernet Tx/Rx queues */
|
||||
for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
|
||||
for (i = 0; i < adap->sge.ethqsets; i++) {
|
||||
eq = &adap->sge.ethrxq[i];
|
||||
if (eq->rspq.desc)
|
||||
free_rspq_fl(adap, &eq->rspq,
|
||||
eq->fl.size ? &eq->fl : NULL);
|
||||
|
||||
etq = &adap->sge.ethtxq[i];
|
||||
if (etq->q.desc) {
|
||||
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
|
||||
etq->q.cntxt_id);
|
||||
|
|
|
@ -6949,6 +6949,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
|
|||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_iq_stop - stop an ingress queue and its FLs
|
||||
* @adap: the adapter
|
||||
* @mbox: mailbox to use for the FW command
|
||||
* @pf: the PF owning the queues
|
||||
* @vf: the VF owning the queues
|
||||
* @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
|
||||
* @iqid: ingress queue id
|
||||
* @fl0id: FL0 queue id or 0xffff if no attached FL0
|
||||
* @fl1id: FL1 queue id or 0xffff if no attached FL1
|
||||
*
|
||||
* Stops an ingress queue and its associated FLs, if any. This causes
|
||||
* any current or future data/messages destined for these queues to be
|
||||
* tossed.
|
||||
*/
|
||||
int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||
unsigned int vf, unsigned int iqtype, unsigned int iqid,
|
||||
unsigned int fl0id, unsigned int fl1id)
|
||||
{
|
||||
struct fw_iq_cmd c;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
|
||||
FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
|
||||
FW_IQ_CMD_VFN_V(vf));
|
||||
c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
|
||||
c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
|
||||
c.iqid = cpu_to_be16(iqid);
|
||||
c.fl0id = cpu_to_be16(fl0id);
|
||||
c.fl1id = cpu_to_be16(fl1id);
|
||||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_iq_free - free an ingress queue and its FLs
|
||||
* @adap: the adapter
|
||||
|
|
|
@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/* verify upper 16 bits are zero */
|
||||
if (vid >> 16)
|
||||
return FM10K_ERR_PARAM;
|
||||
|
||||
set = !(vid & FM10K_VLAN_CLEAR);
|
||||
vid &= ~FM10K_VLAN_CLEAR;
|
||||
|
||||
err = fm10k_iov_select_vid(vf_info, (u16)vid);
|
||||
if (err < 0)
|
||||
return err;
|
||||
/* if the length field has been set, this is a multi-bit
|
||||
* update request. For multi-bit requests, simply disallow
|
||||
* them when the pf_vid has been set. In this case, the PF
|
||||
* should have already cleared the VLAN_TABLE, and if we
|
||||
* allowed them, it could allow a rogue VF to receive traffic
|
||||
* on a VLAN it was not assigned. In the single-bit case, we
|
||||
* need to modify requests for VLAN 0 to use the default PF or
|
||||
* SW vid when assigned.
|
||||
*/
|
||||
|
||||
vid = err;
|
||||
if (vid >> 16) {
|
||||
/* prevent multi-bit requests when PF has
|
||||
* administratively set the VLAN for this VF
|
||||
*/
|
||||
if (vf_info->pf_vid)
|
||||
return FM10K_ERR_PARAM;
|
||||
} else {
|
||||
err = fm10k_iov_select_vid(vf_info, (u16)vid);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
vid = err;
|
||||
}
|
||||
|
||||
/* update VSI info for VF in regards to VLAN table */
|
||||
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
|
||||
|
|
|
@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|||
}
|
||||
|
||||
/**
|
||||
* __i40e_chk_linearize - Check if there are more than 8 fragments per packet
|
||||
* __i40e_chk_linearize - Check if there are more than 8 buffers per packet
|
||||
* @skb: send buffer
|
||||
*
|
||||
* Note: Our HW can't scatter-gather more than 8 fragments to build
|
||||
* a packet on the wire and so we need to figure out the cases where we
|
||||
* need to linearize the skb.
|
||||
* Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
|
||||
* and so we need to figure out the cases where we need to linearize the skb.
|
||||
*
|
||||
* For TSO we need to count the TSO header and segment payload separately.
|
||||
* As such we need to check cases where we have 7 fragments or more as we
|
||||
* can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
|
||||
* the segment payload in the first descriptor, and another 7 for the
|
||||
* fragments.
|
||||
**/
|
||||
bool __i40e_chk_linearize(struct sk_buff *skb)
|
||||
{
|
||||
const struct skb_frag_struct *frag, *stale;
|
||||
int gso_size, nr_frags, sum;
|
||||
int nr_frags, sum;
|
||||
|
||||
/* check to see if TSO is enabled, if so we may get a repreive */
|
||||
gso_size = skb_shinfo(skb)->gso_size;
|
||||
if (unlikely(!gso_size))
|
||||
return true;
|
||||
|
||||
/* no need to check if number of frags is less than 8 */
|
||||
/* no need to check if number of frags is less than 7 */
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
if (nr_frags < I40E_MAX_BUFFER_TXD)
|
||||
if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
|
||||
return false;
|
||||
|
||||
/* We need to walk through the list and validate that each group
|
||||
* of 6 fragments totals at least gso_size. However we don't need
|
||||
* to perform such validation on the first or last 6 since the first
|
||||
* 6 cannot inherit any data from a descriptor before them, and the
|
||||
* last 6 cannot inherit any data from a descriptor after them.
|
||||
* to perform such validation on the last 6 since the last 6 cannot
|
||||
* inherit any data from a descriptor after them.
|
||||
*/
|
||||
nr_frags -= I40E_MAX_BUFFER_TXD - 1;
|
||||
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
|
||||
frag = &skb_shinfo(skb)->frags[0];
|
||||
|
||||
/* Initialize size to the negative value of gso_size minus 1. We
|
||||
|
@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
|
|||
* descriptors for a single transmit as the header and previous
|
||||
* fragment are already consuming 2 descriptors.
|
||||
*/
|
||||
sum = 1 - gso_size;
|
||||
sum = 1 - skb_shinfo(skb)->gso_size;
|
||||
|
||||
/* Add size of frags 1 through 5 to create our initial sum */
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(++frag);
|
||||
/* Add size of frags 0 through 4 to create our initial sum */
|
||||
sum += skb_frag_size(frag++);
|
||||
sum += skb_frag_size(frag++);
|
||||
sum += skb_frag_size(frag++);
|
||||
sum += skb_frag_size(frag++);
|
||||
sum += skb_frag_size(frag++);
|
||||
|
||||
/* Walk through fragments adding latest fragment, testing it, and
|
||||
* then removing stale fragments from the sum.
|
||||
*/
|
||||
stale = &skb_shinfo(skb)->frags[0];
|
||||
for (;;) {
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(frag++);
|
||||
|
||||
/* if sum is negative we failed to make sufficient progress */
|
||||
if (sum < 0)
|
||||
|
@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
|
|||
if (!--nr_frags)
|
||||
break;
|
||||
|
||||
sum -= skb_frag_size(++stale);
|
||||
sum -= skb_frag_size(stale++);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|||
**/
|
||||
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
|
||||
{
|
||||
/* we can only support up to 8 data buffers for a single send */
|
||||
if (likely(count <= I40E_MAX_BUFFER_TXD))
|
||||
/* Both TSO and single send will work if count is less than 8 */
|
||||
if (likely(count < I40E_MAX_BUFFER_TXD))
|
||||
return false;
|
||||
|
||||
return __i40e_chk_linearize(skb);
|
||||
if (skb_is_gso(skb))
|
||||
return __i40e_chk_linearize(skb);
|
||||
|
||||
/* we can support up to 8 data buffers for a single send */
|
||||
return count != I40E_MAX_BUFFER_TXD;
|
||||
}
|
||||
#endif /* _I40E_TXRX_H_ */
|
||||
|
|
|
@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
|
|||
}
|
||||
|
||||
/**
|
||||
* __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
|
||||
* __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
|
||||
* @skb: send buffer
|
||||
*
|
||||
* Note: Our HW can't scatter-gather more than 8 fragments to build
|
||||
* a packet on the wire and so we need to figure out the cases where we
|
||||
* need to linearize the skb.
|
||||
* Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
|
||||
* and so we need to figure out the cases where we need to linearize the skb.
|
||||
*
|
||||
* For TSO we need to count the TSO header and segment payload separately.
|
||||
* As such we need to check cases where we have 7 fragments or more as we
|
||||
* can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
|
||||
* the segment payload in the first descriptor, and another 7 for the
|
||||
* fragments.
|
||||
**/
|
||||
bool __i40evf_chk_linearize(struct sk_buff *skb)
|
||||
{
|
||||
const struct skb_frag_struct *frag, *stale;
|
||||
int gso_size, nr_frags, sum;
|
||||
int nr_frags, sum;
|
||||
|
||||
/* check to see if TSO is enabled, if so we may get a repreive */
|
||||
gso_size = skb_shinfo(skb)->gso_size;
|
||||
if (unlikely(!gso_size))
|
||||
return true;
|
||||
|
||||
/* no need to check if number of frags is less than 8 */
|
||||
/* no need to check if number of frags is less than 7 */
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
if (nr_frags < I40E_MAX_BUFFER_TXD)
|
||||
if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
|
||||
return false;
|
||||
|
||||
/* We need to walk through the list and validate that each group
|
||||
* of 6 fragments totals at least gso_size. However we don't need
|
||||
* to perform such validation on the first or last 6 since the first
|
||||
* 6 cannot inherit any data from a descriptor before them, and the
|
||||
* last 6 cannot inherit any data from a descriptor after them.
|
||||
* to perform such validation on the last 6 since the last 6 cannot
|
||||
* inherit any data from a descriptor after them.
|
||||
*/
|
||||
nr_frags -= I40E_MAX_BUFFER_TXD - 1;
|
||||
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
|
||||
frag = &skb_shinfo(skb)->frags[0];
|
||||
|
||||
/* Initialize size to the negative value of gso_size minus 1. We
|
||||
|
@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
|
|||
* descriptors for a single transmit as the header and previous
|
||||
* fragment are already consuming 2 descriptors.
|
||||
*/
|
||||
sum = 1 - gso_size;
|
||||
sum = 1 - skb_shinfo(skb)->gso_size;
|
||||
|
||||
/* Add size of frags 1 through 5 to create our initial sum */
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(++frag);
|
||||
/* Add size of frags 0 through 4 to create our initial sum */
|
||||
sum += skb_frag_size(frag++);
|
||||
sum += skb_frag_size(frag++);
|
||||
sum += skb_frag_size(frag++);
|
||||
sum += skb_frag_size(frag++);
|
||||
sum += skb_frag_size(frag++);
|
||||
|
||||
/* Walk through fragments adding latest fragment, testing it, and
|
||||
* then removing stale fragments from the sum.
|
||||
*/
|
||||
stale = &skb_shinfo(skb)->frags[0];
|
||||
for (;;) {
|
||||
sum += skb_frag_size(++frag);
|
||||
sum += skb_frag_size(frag++);
|
||||
|
||||
/* if sum is negative we failed to make sufficient progress */
|
||||
if (sum < 0)
|
||||
|
@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
|
|||
if (!--nr_frags)
|
||||
break;
|
||||
|
||||
sum -= skb_frag_size(++stale);
|
||||
sum -= skb_frag_size(stale++);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|||
**/
|
||||
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
|
||||
{
|
||||
/* we can only support up to 8 data buffers for a single send */
|
||||
if (likely(count <= I40E_MAX_BUFFER_TXD))
|
||||
/* Both TSO and single send will work if count is less than 8 */
|
||||
if (likely(count < I40E_MAX_BUFFER_TXD))
|
||||
return false;
|
||||
|
||||
return __i40evf_chk_linearize(skb);
|
||||
if (skb_is_gso(skb))
|
||||
return __i40evf_chk_linearize(skb);
|
||||
|
||||
/* we can support up to 8 data buffers for a single send */
|
||||
return count != I40E_MAX_BUFFER_TXD;
|
||||
}
|
||||
#endif /* _I40E_TXRX_H_ */
|
||||
|
|
|
@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
|
|||
case ETH_SS_STATS:
|
||||
return bitmap_iterator_count(&it) +
|
||||
(priv->tx_ring_num * 2) +
|
||||
(priv->rx_ring_num * 2);
|
||||
(priv->rx_ring_num * 3);
|
||||
case ETH_SS_TEST:
|
||||
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
|
||||
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
|
||||
|
@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
|
|||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
data[index++] = priv->rx_ring[i]->packets;
|
||||
data[index++] = priv->rx_ring[i]->bytes;
|
||||
data[index++] = priv->rx_ring[i]->dropped;
|
||||
}
|
||||
spin_unlock_bh(&priv->stats_lock);
|
||||
|
||||
|
@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
|
|||
"rx%d_packets", i);
|
||||
sprintf(data + (index++) * ETH_GSTRING_LEN,
|
||||
"rx%d_bytes", i);
|
||||
sprintf(data + (index++) * ETH_GSTRING_LEN,
|
||||
"rx%d_dropped", i);
|
||||
}
|
||||
break;
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
|
|
|
@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
|||
u64 in_mod = reset << 8 | port;
|
||||
int err;
|
||||
int i, counter_index;
|
||||
unsigned long sw_rx_dropped = 0;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
|
||||
if (IS_ERR(mailbox))
|
||||
|
@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
|||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
stats->rx_packets += priv->rx_ring[i]->packets;
|
||||
stats->rx_bytes += priv->rx_ring[i]->bytes;
|
||||
sw_rx_dropped += priv->rx_ring[i]->dropped;
|
||||
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
|
||||
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
|
||||
priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
|
||||
|
@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
|||
&mlx4_en_stats->MCAST_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
stats->collisions = 0;
|
||||
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
|
||||
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
|
||||
sw_rx_dropped;
|
||||
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
|
||||
stats->rx_over_errors = 0;
|
||||
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
|
||||
|
|
|
@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
|
|||
gfp_t gfp = _gfp;
|
||||
|
||||
if (order)
|
||||
gfp |= __GFP_COMP | __GFP_NOWARN;
|
||||
gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
|
||||
page = alloc_pages(gfp, order);
|
||||
if (likely(page))
|
||||
break;
|
||||
|
@ -126,7 +126,9 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
|
|||
dma_unmap_page(priv->ddev, page_alloc[i].dma,
|
||||
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
|
||||
page = page_alloc[i].page;
|
||||
set_page_count(page, 1);
|
||||
/* Revert changes done by mlx4_alloc_pages */
|
||||
page_ref_sub(page, page_alloc[i].page_size /
|
||||
priv->frag_info[i].frag_stride - 1);
|
||||
put_page(page);
|
||||
}
|
||||
}
|
||||
|
@ -176,7 +178,9 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
|
|||
dma_unmap_page(priv->ddev, page_alloc->dma,
|
||||
page_alloc->page_size, PCI_DMA_FROMDEVICE);
|
||||
page = page_alloc->page;
|
||||
set_page_count(page, 1);
|
||||
/* Revert changes done by mlx4_alloc_pages */
|
||||
page_ref_sub(page, page_alloc->page_size /
|
||||
priv->frag_info[i].frag_stride - 1);
|
||||
put_page(page);
|
||||
page_alloc->page = NULL;
|
||||
}
|
||||
|
@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|||
/* GRO not possible, complete processing here */
|
||||
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
|
||||
if (!skb) {
|
||||
priv->stats.rx_dropped++;
|
||||
ring->dropped++;
|
||||
goto next;
|
||||
}
|
||||
|
||||
|
|
|
@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_pci_enable_device(struct mlx4_dev *dev)
|
||||
{
|
||||
struct pci_dev *pdev = dev->persist->pdev;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&dev->persist->pci_status_mutex);
|
||||
if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
|
||||
err = pci_enable_device(pdev);
|
||||
if (!err)
|
||||
dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
|
||||
}
|
||||
mutex_unlock(&dev->persist->pci_status_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx4_pci_disable_device(struct mlx4_dev *dev)
|
||||
{
|
||||
struct pci_dev *pdev = dev->persist->pdev;
|
||||
|
||||
mutex_lock(&dev->persist->pci_status_mutex);
|
||||
if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
|
||||
pci_disable_device(pdev);
|
||||
dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
|
||||
}
|
||||
mutex_unlock(&dev->persist->pci_status_mutex);
|
||||
}
|
||||
|
||||
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
|
||||
int total_vfs, int *nvfs, struct mlx4_priv *priv,
|
||||
int reset_flow)
|
||||
|
@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
|
|||
|
||||
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
err = mlx4_pci_enable_device(&priv->dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
|
||||
return err;
|
||||
|
@ -3715,7 +3743,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
|
|||
pci_release_regions(pdev);
|
||||
|
||||
err_disable_pdev:
|
||||
pci_disable_device(pdev);
|
||||
mlx4_pci_disable_device(&priv->dev);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
return err;
|
||||
}
|
||||
|
@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
priv->pci_dev_data = id->driver_data;
|
||||
mutex_init(&dev->persist->device_state_mutex);
|
||||
mutex_init(&dev->persist->interface_state_mutex);
|
||||
mutex_init(&dev->persist->pci_status_mutex);
|
||||
|
||||
ret = devlink_register(devlink, &pdev->dev);
|
||||
if (ret)
|
||||
|
@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
mlx4_pci_disable_device(dev);
|
||||
devlink_unregister(devlink);
|
||||
kfree(dev->persist);
|
||||
devlink_free(devlink);
|
||||
|
@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
|
|||
if (state == pci_channel_io_perm_failure)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
pci_disable_device(pdev);
|
||||
mlx4_pci_disable_device(persist->dev);
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
|
@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
|
|||
{
|
||||
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
|
||||
struct mlx4_dev *dev = persist->dev;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int ret;
|
||||
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
|
||||
int total_vfs;
|
||||
int err;
|
||||
|
||||
mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret) {
|
||||
mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
|
||||
err = mlx4_pci_enable_device(dev);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
pci_restore_state(pdev);
|
||||
pci_save_state(pdev);
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static void mlx4_pci_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
|
||||
struct mlx4_dev *dev = persist->dev;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
|
||||
int total_vfs;
|
||||
int err;
|
||||
|
||||
mlx4_err(dev, "%s was called\n", __func__);
|
||||
total_vfs = dev->persist->num_vfs;
|
||||
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
|
||||
|
||||
mutex_lock(&persist->interface_state_mutex);
|
||||
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
|
||||
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
|
||||
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
|
||||
priv, 1);
|
||||
if (ret) {
|
||||
mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
|
||||
__func__, ret);
|
||||
if (err) {
|
||||
mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
|
||||
__func__, err);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = restore_current_port_types(dev, dev->persist->
|
||||
err = restore_current_port_types(dev, dev->persist->
|
||||
curr_port_type, dev->persist->
|
||||
curr_port_poss_type);
|
||||
if (ret)
|
||||
mlx4_err(dev, "could not restore original port types (%d)\n", ret);
|
||||
if (err)
|
||||
mlx4_err(dev, "could not restore original port types (%d)\n", err);
|
||||
}
|
||||
end:
|
||||
mutex_unlock(&persist->interface_state_mutex);
|
||||
|
||||
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static void mlx4_shutdown(struct pci_dev *pdev)
|
||||
|
@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
|
|||
static const struct pci_error_handlers mlx4_err_handler = {
|
||||
.error_detected = mlx4_pci_err_detected,
|
||||
.slot_reset = mlx4_pci_slot_reset,
|
||||
.resume = mlx4_pci_resume,
|
||||
};
|
||||
|
||||
static struct pci_driver mlx4_driver = {
|
||||
|
|
|
@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
|
|||
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
|
||||
int init_port_ref[MLX4_MAX_PORTS + 1];
|
||||
u16 max_mtu[MLX4_MAX_PORTS + 1];
|
||||
u8 pptx;
|
||||
u8 pprx;
|
||||
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
|
||||
struct mlx4_resource_tracker res_tracker;
|
||||
struct workqueue_struct *comm_wq;
|
||||
|
|
|
@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
|
|||
unsigned long csum_ok;
|
||||
unsigned long csum_none;
|
||||
unsigned long csum_complete;
|
||||
unsigned long dropped;
|
||||
int hwtstamp_rx_filter;
|
||||
cpumask_var_t affinity_mask;
|
||||
};
|
||||
|
|
|
@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
|
|||
}
|
||||
|
||||
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
|
||||
/* Slave cannot change Global Pause configuration */
|
||||
if (slave != mlx4_master_func_num(dev) &&
|
||||
((gen_context->pptx != master->pptx) ||
|
||||
(gen_context->pprx != master->pprx))) {
|
||||
gen_context->pptx = master->pptx;
|
||||
gen_context->pprx = master->pprx;
|
||||
mlx4_warn(dev,
|
||||
"denying Global Pause change for slave:%d\n",
|
||||
slave);
|
||||
} else {
|
||||
master->pptx = gen_context->pptx;
|
||||
master->pprx = gen_context->pprx;
|
||||
}
|
||||
break;
|
||||
case MLX4_SET_PORT_GID_TABLE:
|
||||
/* change to MULTIPLE entries: number of guest's gids
|
||||
|
|
|
@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
|
||||
{
|
||||
qed_chain_consume(&rxq->rx_bd_ring);
|
||||
rxq->sw_rx_cons++;
|
||||
}
|
||||
|
||||
/* This function reuses the buffer(from an offset) from
|
||||
* consumer index to producer index in the bd ring
|
||||
*/
|
||||
|
@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
|
|||
curr_cons->data = NULL;
|
||||
}
|
||||
|
||||
/* In case of allocation failures reuse buffers
|
||||
* from consumer index to produce buffers for firmware
|
||||
*/
|
||||
static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
|
||||
struct qede_dev *edev, u8 count)
|
||||
{
|
||||
struct sw_rx_data *curr_cons;
|
||||
|
||||
for (; count > 0; count--) {
|
||||
curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
|
||||
qede_reuse_page(edev, rxq, curr_cons);
|
||||
qede_rx_bd_ring_consume(rxq);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
|
||||
struct qede_rx_queue *rxq,
|
||||
struct sw_rx_data *curr_cons)
|
||||
|
@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
|
|||
curr_cons->page_offset += rxq->rx_buf_seg_size;
|
||||
|
||||
if (curr_cons->page_offset == PAGE_SIZE) {
|
||||
if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
|
||||
if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
|
||||
/* Since we failed to allocate new buffer
|
||||
* current buffer can be used again.
|
||||
*/
|
||||
curr_cons->page_offset -= rxq->rx_buf_seg_size;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
|
|||
len_on_bd);
|
||||
|
||||
if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
|
||||
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
|
||||
/* Incr page ref count to reuse on allocation failure
|
||||
* so that it doesn't get freed while freeing SKB.
|
||||
*/
|
||||
atomic_inc(¤t_bd->data->_count);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
|
|||
return 0;
|
||||
|
||||
out:
|
||||
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
|
||||
qede_recycle_rx_bd_ring(rxq, edev, 1);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
|
|||
tpa_info->skb = netdev_alloc_skb(edev->ndev,
|
||||
le16_to_cpu(cqe->len_on_first_bd));
|
||||
if (unlikely(!tpa_info->skb)) {
|
||||
DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
|
||||
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
|
||||
return;
|
||||
goto cons_buf;
|
||||
}
|
||||
|
||||
skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
|
||||
|
@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
|
|||
/* This is needed in order to enable forwarding support */
|
||||
qede_set_gro_params(edev, tpa_info->skb, cqe);
|
||||
|
||||
cons_buf: /* We still need to handle bd_len_list to consume buffers */
|
||||
if (likely(cqe->ext_bd_len_list[0]))
|
||||
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
|
||||
le16_to_cpu(cqe->ext_bd_len_list[0]));
|
||||
|
@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
|
|||
const struct iphdr *iph = ip_hdr(skb);
|
||||
struct tcphdr *th;
|
||||
|
||||
skb_set_network_header(skb, 0);
|
||||
skb_set_transport_header(skb, sizeof(struct iphdr));
|
||||
th = tcp_hdr(skb);
|
||||
|
||||
|
@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
|
|||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
struct tcphdr *th;
|
||||
|
||||
skb_set_network_header(skb, 0);
|
||||
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
|
||||
th = tcp_hdr(skb);
|
||||
|
||||
|
@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
|
|||
struct sk_buff *skb,
|
||||
u16 vlan_tag)
|
||||
{
|
||||
/* FW can send a single MTU sized packet from gro flow
|
||||
* due to aggregation timeout/last segment etc. which
|
||||
* is not expected to be a gro packet. If a skb has zero
|
||||
* frags then simply push it in the stack as non gso skb.
|
||||
*/
|
||||
if (unlikely(!skb->data_len)) {
|
||||
skb_shinfo(skb)->gso_type = 0;
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
goto send_skb;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
if (skb_shinfo(skb)->gso_size) {
|
||||
skb_set_network_header(skb, 0);
|
||||
|
||||
switch (skb->protocol) {
|
||||
case htons(ETH_P_IP):
|
||||
qede_gro_ip_csum(skb);
|
||||
|
@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
|
|||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
send_skb:
|
||||
skb_record_rx_queue(skb, fp->rss_id);
|
||||
qede_skb_receive(edev, fp, skb, vlan_tag);
|
||||
}
|
||||
|
@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
|||
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
|
||||
sw_comp_cons, parse_flag);
|
||||
rxq->rx_hw_errors++;
|
||||
qede_reuse_page(edev, rxq, sw_rx_data);
|
||||
goto next_rx;
|
||||
qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
|
||||
goto next_cqe;
|
||||
}
|
||||
|
||||
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
|
||||
if (unlikely(!skb)) {
|
||||
DP_NOTICE(edev,
|
||||
"Build_skb failed, dropping incoming packet\n");
|
||||
qede_reuse_page(edev, rxq, sw_rx_data);
|
||||
qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
|
||||
rxq->rx_alloc_errors++;
|
||||
goto next_rx;
|
||||
goto next_cqe;
|
||||
}
|
||||
|
||||
/* Copy data into SKB */
|
||||
|
@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
|||
if (unlikely(qede_realloc_rx_buffer(edev, rxq,
|
||||
sw_rx_data))) {
|
||||
DP_ERR(edev, "Failed to allocate rx buffer\n");
|
||||
/* Incr page ref count to reuse on allocation
|
||||
* failure so that it doesn't get freed while
|
||||
* freeing SKB.
|
||||
*/
|
||||
|
||||
atomic_inc(&sw_rx_data->data->_count);
|
||||
rxq->rx_alloc_errors++;
|
||||
qede_recycle_rx_bd_ring(rxq, edev,
|
||||
fp_cqe->bd_num);
|
||||
dev_kfree_skb_any(skb);
|
||||
goto next_cqe;
|
||||
}
|
||||
}
|
||||
|
||||
qede_rx_bd_ring_consume(rxq);
|
||||
|
||||
if (fp_cqe->bd_num != 1) {
|
||||
u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
|
||||
u8 num_frags;
|
||||
|
@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
|||
num_frags--) {
|
||||
u16 cur_size = pkt_len > rxq->rx_buf_size ?
|
||||
rxq->rx_buf_size : pkt_len;
|
||||
|
||||
WARN_ONCE(!cur_size,
|
||||
"Still got %d BDs for mapping jumbo, but length became 0\n",
|
||||
num_frags);
|
||||
|
||||
if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
|
||||
if (unlikely(!cur_size)) {
|
||||
DP_ERR(edev,
|
||||
"Still got %d BDs for mapping jumbo, but length became 0\n",
|
||||
num_frags);
|
||||
qede_recycle_rx_bd_ring(rxq, edev,
|
||||
num_frags);
|
||||
dev_kfree_skb_any(skb);
|
||||
goto next_cqe;
|
||||
}
|
||||
|
||||
if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
|
||||
qede_recycle_rx_bd_ring(rxq, edev,
|
||||
num_frags);
|
||||
dev_kfree_skb_any(skb);
|
||||
goto next_cqe;
|
||||
}
|
||||
|
||||
rxq->sw_rx_cons++;
|
||||
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
|
||||
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
|
||||
qed_chain_consume(&rxq->rx_bd_ring);
|
||||
qede_rx_bd_ring_consume(rxq);
|
||||
|
||||
dma_unmap_page(&edev->pdev->dev,
|
||||
sw_rx_data->mapping,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
|||
pkt_len -= cur_size;
|
||||
}
|
||||
|
||||
if (pkt_len)
|
||||
if (unlikely(pkt_len))
|
||||
DP_ERR(edev,
|
||||
"Mapped all BDs of jumbo, but still have %d bytes\n",
|
||||
pkt_len);
|
||||
|
@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
|||
skb_record_rx_queue(skb, fp->rss_id);
|
||||
|
||||
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
|
||||
|
||||
qed_chain_consume(&rxq->rx_bd_ring);
|
||||
next_rx:
|
||||
rxq->sw_rx_cons++;
|
||||
next_rx_only:
|
||||
rx_pkt++;
|
||||
|
||||
|
@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
|
|||
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
|
||||
struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
|
||||
|
||||
if (replace_buf) {
|
||||
if (replace_buf->data) {
|
||||
dma_unmap_page(&edev->pdev->dev,
|
||||
dma_unmap_addr(replace_buf, mapping),
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
@ -2377,7 +2440,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
|
|||
static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
||||
struct qede_rx_queue *rxq)
|
||||
{
|
||||
int i, rc, size, num_allocated;
|
||||
int i, rc, size;
|
||||
|
||||
rxq->num_rx_buffers = edev->q_num_rx_buffers;
|
||||
|
||||
|
@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
|||
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
|
||||
if (!rxq->sw_rx_ring) {
|
||||
DP_ERR(edev, "Rx buffers ring allocation failed\n");
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
|||
/* Allocate buffers for the Rx ring */
|
||||
for (i = 0; i < rxq->num_rx_buffers; i++) {
|
||||
rc = qede_alloc_rx_buffer(edev, rxq);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
num_allocated = i;
|
||||
if (!num_allocated) {
|
||||
DP_ERR(edev, "Rx buffers allocation failed\n");
|
||||
goto err;
|
||||
} else if (num_allocated < rxq->num_rx_buffers) {
|
||||
DP_NOTICE(edev,
|
||||
"Allocated less buffers than desired (%d allocated)\n",
|
||||
num_allocated);
|
||||
if (rc) {
|
||||
DP_ERR(edev,
|
||||
"Rx buffers allocation failed at index %d\n", i);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
qede_alloc_sge_mem(edev, rxq);
|
||||
|
||||
return 0;
|
||||
|
||||
rc = qede_alloc_sge_mem(edev, rxq);
|
||||
err:
|
||||
qede_free_mem_rxq(edev, rxq);
|
||||
return -ENOMEM;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qede_free_mem_txq(struct qede_dev *edev,
|
||||
|
@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
|
|||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
qede_free_mem_fp(edev, fp);
|
||||
return -ENOMEM;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qede_free_mem_load(struct qede_dev *edev)
|
||||
|
@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
|
|||
struct qede_fastpath *fp = &edev->fp_array[rss_id];
|
||||
|
||||
rc = qede_alloc_mem_fp(edev, fp);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
|
||||
if (rss_id != QEDE_RSS_CNT(edev)) {
|
||||
/* Failed allocating memory for all the queues */
|
||||
if (!rss_id) {
|
||||
if (rc) {
|
||||
DP_ERR(edev,
|
||||
"Failed to allocate memory for the leading queue\n");
|
||||
rc = -ENOMEM;
|
||||
} else {
|
||||
DP_NOTICE(edev,
|
||||
"Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
|
||||
QEDE_RSS_CNT(edev), rss_id);
|
||||
"Failed to allocate memory for fastpath - rss id = %d\n",
|
||||
rss_id);
|
||||
qede_free_mem_load(edev);
|
||||
return rc;
|
||||
}
|
||||
edev->num_rss = rss_id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
|
|||
rate = clk_get_rate(clk);
|
||||
clk_put(clk);
|
||||
|
||||
if (!rate)
|
||||
return -EINVAL;
|
||||
|
||||
inc = 1000000000ULL << 20;
|
||||
do_div(inc, rate);
|
||||
|
||||
|
|
|
@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
|
|||
__func__);
|
||||
return ret;
|
||||
}
|
||||
ret = sh_eth_dev_init(ndev, false);
|
||||
ret = sh_eth_dev_init(ndev, true);
|
||||
if (ret < 0) {
|
||||
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mdp->irq_enabled = true;
|
||||
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
|
||||
/* Setting the Rx mode will start the Rx process. */
|
||||
sh_eth_write(ndev, EDRRR_R, EDRRR);
|
||||
netif_device_attach(ndev);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,9 @@
|
|||
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
|
||||
#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
|
||||
|
||||
#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
|
||||
#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
|
||||
|
||||
#define EMAC_SPLITTER_CTRL_REG 0x0
|
||||
#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
|
||||
#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
|
||||
|
@ -148,7 +151,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
|
|||
int phymode = dwmac->interface;
|
||||
u32 reg_offset = dwmac->reg_offset;
|
||||
u32 reg_shift = dwmac->reg_shift;
|
||||
u32 ctrl, val;
|
||||
u32 ctrl, val, module;
|
||||
|
||||
switch (phymode) {
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
|
@ -175,12 +178,19 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
|
|||
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
|
||||
ctrl |= val << reg_shift;
|
||||
|
||||
if (dwmac->f2h_ptp_ref_clk)
|
||||
if (dwmac->f2h_ptp_ref_clk) {
|
||||
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
|
||||
else
|
||||
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
|
||||
&module);
|
||||
module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
|
||||
regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
|
||||
module);
|
||||
} else {
|
||||
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
|
||||
}
|
||||
|
||||
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
|
|||
int i, ret;
|
||||
u32 reg;
|
||||
|
||||
pm_runtime_get_sync(&priv->pdev->dev);
|
||||
|
||||
if (!cpsw_common_res_usage_state(priv))
|
||||
cpsw_intr_disable(priv);
|
||||
netif_carrier_off(ndev);
|
||||
|
||||
pm_runtime_get_sync(&priv->pdev->dev);
|
||||
|
||||
reg = priv->version;
|
||||
|
||||
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
|
||||
|
|
|
@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
|
|||
pdata->hw_ram_addr = auxdata->hw_ram_addr;
|
||||
}
|
||||
|
||||
pdev->dev.platform_data = pdata;
|
||||
|
||||
return pdata;
|
||||
}
|
||||
|
||||
|
@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
|
|||
cpdma_ctlr_destroy(priv->dma);
|
||||
|
||||
unregister_netdev(ndev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
free_netdev(ndev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
|
|||
return -ENOMEM;
|
||||
|
||||
mutex_init(&ks->lock);
|
||||
ks->spi = spi_dev_get(spi);
|
||||
ks->spi = spi;
|
||||
ks->chip = &ks8995_chip[variant];
|
||||
|
||||
if (ks->spi->dev.of_node) {
|
||||
|
|
|
@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
|
|||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info,
|
||||
},
|
||||
/* Huawei E3372 fails unless NDP comes after the IP packets */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
|
||||
/* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
|
||||
* (12d1:157d), are known to fail unless the NDP is placed
|
||||
* after the IP packets. Applying the quirk to all Huawei
|
||||
* devices is broader than necessary, but harmless.
|
||||
*/
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
|
||||
},
|
||||
/* default entry */
|
||||
|
|
|
@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
|
|||
union Vmxnet3_GenericDesc *gdesc)
|
||||
{
|
||||
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
|
||||
/* typical case: TCP/UDP over IP and both csums are correct */
|
||||
if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
|
||||
VMXNET3_RCD_CSUM_OK) {
|
||||
if (gdesc->rcd.v4 &&
|
||||
(le32_to_cpu(gdesc->dword[3]) &
|
||||
VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
|
||||
BUG_ON(gdesc->rcd.frg);
|
||||
} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
|
||||
(1 << VMXNET3_RCD_TUC_SHIFT))) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
|
||||
BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
|
||||
BUG_ON(gdesc->rcd.frg);
|
||||
} else {
|
||||
if (gdesc->rcd.csum) {
|
||||
|
|
|
@ -69,10 +69,10 @@
|
|||
/*
|
||||
* Version numbers
|
||||
*/
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
|
||||
|
||||
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
|
||||
|
||||
#if defined(CONFIG_PCI_MSI)
|
||||
/* RSS only makes sense if MSI-X is supported. */
|
||||
|
|
|
@ -60,41 +60,6 @@ struct pcpu_dstats {
|
|||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
|
||||
{
|
||||
return dst;
|
||||
}
|
||||
|
||||
static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
return ip_local_out(net, sk, skb);
|
||||
}
|
||||
|
||||
static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
/* TO-DO: return max ethernet size? */
|
||||
return dst->dev->mtu;
|
||||
}
|
||||
|
||||
static void vrf_dst_destroy(struct dst_entry *dst)
|
||||
{
|
||||
/* our dst lives forever - or until the device is closed */
|
||||
}
|
||||
|
||||
static unsigned int vrf_default_advmss(const struct dst_entry *dst)
|
||||
{
|
||||
return 65535 - 40;
|
||||
}
|
||||
|
||||
static struct dst_ops vrf_dst_ops = {
|
||||
.family = AF_INET,
|
||||
.local_out = vrf_ip_local_out,
|
||||
.check = vrf_ip_check,
|
||||
.mtu = vrf_v4_mtu,
|
||||
.destroy = vrf_dst_destroy,
|
||||
.default_advmss = vrf_default_advmss,
|
||||
};
|
||||
|
||||
/* neighbor handling is done with actual device; do not want
|
||||
* to flip skb->dev for those ndisc packets. This really fails
|
||||
* for multiple next protocols (e.g., NEXTHDR_HOP). But it is
|
||||
|
@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
|
||||
{
|
||||
return dst;
|
||||
}
|
||||
|
||||
static struct dst_ops vrf_dst_ops6 = {
|
||||
.family = AF_INET6,
|
||||
.local_out = ip6_local_out,
|
||||
.check = vrf_ip6_check,
|
||||
.mtu = vrf_v4_mtu,
|
||||
.destroy = vrf_dst_destroy,
|
||||
.default_advmss = vrf_default_advmss,
|
||||
};
|
||||
|
||||
static int init_dst_ops6_kmem_cachep(void)
|
||||
{
|
||||
vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
|
||||
sizeof(struct rt6_info),
|
||||
0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
|
||||
if (!vrf_dst_ops6.kmem_cachep)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_dst_ops6_kmem_cachep(void)
|
||||
{
|
||||
kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
|
||||
}
|
||||
|
||||
static int vrf_input6(struct sk_buff *skb)
|
||||
{
|
||||
skb->dev->stats.rx_errors++;
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* modelled after ip6_finish_output2 */
|
||||
static int vrf_finish_output6(struct net *net, struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
|
@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
|
||||
}
|
||||
|
||||
static void vrf_rt6_destroy(struct net_vrf *vrf)
|
||||
static void vrf_rt6_release(struct net_vrf *vrf)
|
||||
{
|
||||
dst_destroy(&vrf->rt6->dst);
|
||||
free_percpu(vrf->rt6->rt6i_pcpu);
|
||||
dst_release(&vrf->rt6->dst);
|
||||
vrf->rt6 = NULL;
|
||||
}
|
||||
|
||||
static int vrf_rt6_create(struct net_device *dev)
|
||||
{
|
||||
struct net_vrf *vrf = netdev_priv(dev);
|
||||
struct dst_entry *dst;
|
||||
struct net *net = dev_net(dev);
|
||||
struct rt6_info *rt6;
|
||||
int cpu;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
|
||||
DST_OBSOLETE_NONE,
|
||||
(DST_HOST | DST_NOPOLICY | DST_NOXFRM));
|
||||
rt6 = ip6_dst_alloc(net, dev,
|
||||
DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
|
||||
if (!rt6)
|
||||
goto out;
|
||||
|
||||
dst = &rt6->dst;
|
||||
|
||||
rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
|
||||
if (!rt6->rt6i_pcpu) {
|
||||
dst_destroy(dst);
|
||||
goto out;
|
||||
}
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
|
||||
*p = NULL;
|
||||
}
|
||||
|
||||
memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
|
||||
|
||||
INIT_LIST_HEAD(&rt6->rt6i_siblings);
|
||||
INIT_LIST_HEAD(&rt6->rt6i_uncached);
|
||||
|
||||
rt6->dst.input = vrf_input6;
|
||||
rt6->dst.output = vrf_output6;
|
||||
|
||||
rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
|
||||
|
||||
atomic_set(&rt6->dst.__refcnt, 2);
|
||||
|
||||
rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
|
||||
dst_hold(&rt6->dst);
|
||||
vrf->rt6 = rt6;
|
||||
rc = 0;
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
#else
|
||||
static int init_dst_ops6_kmem_cachep(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_dst_ops6_kmem_cachep(void)
|
||||
{
|
||||
}
|
||||
|
||||
static void vrf_rt6_destroy(struct net_vrf *vrf)
|
||||
static void vrf_rt6_release(struct net_vrf *vrf)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
}
|
||||
|
||||
static void vrf_rtable_destroy(struct net_vrf *vrf)
|
||||
static void vrf_rtable_release(struct net_vrf *vrf)
|
||||
{
|
||||
struct dst_entry *dst = (struct dst_entry *)vrf->rth;
|
||||
|
||||
dst_destroy(dst);
|
||||
dst_release(dst);
|
||||
vrf->rth = NULL;
|
||||
}
|
||||
|
||||
|
@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
|
|||
struct net_vrf *vrf = netdev_priv(dev);
|
||||
struct rtable *rth;
|
||||
|
||||
rth = dst_alloc(&vrf_dst_ops, dev, 2,
|
||||
DST_OBSOLETE_NONE,
|
||||
(DST_HOST | DST_NOPOLICY | DST_NOXFRM));
|
||||
rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
|
||||
if (rth) {
|
||||
rth->dst.output = vrf_output;
|
||||
rth->rt_genid = rt_genid_ipv4(dev_net(dev));
|
||||
rth->rt_flags = 0;
|
||||
rth->rt_type = RTN_UNICAST;
|
||||
rth->rt_is_input = 0;
|
||||
rth->rt_iif = 0;
|
||||
rth->rt_pmtu = 0;
|
||||
rth->rt_gateway = 0;
|
||||
rth->rt_uses_gateway = 0;
|
||||
rth->rt_table_id = vrf->tb_id;
|
||||
INIT_LIST_HEAD(&rth->rt_uncached);
|
||||
rth->rt_uncached_list = NULL;
|
||||
}
|
||||
|
||||
return rth;
|
||||
|
@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
|
|||
struct net_device *port_dev;
|
||||
struct list_head *iter;
|
||||
|
||||
vrf_rtable_destroy(vrf);
|
||||
vrf_rt6_destroy(vrf);
|
||||
vrf_rtable_release(vrf);
|
||||
vrf_rt6_release(vrf);
|
||||
|
||||
netdev_for_each_lower_dev(dev, port_dev, iter)
|
||||
vrf_del_slave(dev, port_dev);
|
||||
|
@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
|
|||
return 0;
|
||||
|
||||
out_rth:
|
||||
vrf_rtable_destroy(vrf);
|
||||
vrf_rtable_release(vrf);
|
||||
out_stats:
|
||||
free_percpu(dev->dstats);
|
||||
dev->dstats = NULL;
|
||||
|
@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
|
|||
struct net_vrf *vrf = netdev_priv(dev);
|
||||
|
||||
rth = vrf->rth;
|
||||
atomic_inc(&rth->dst.__refcnt);
|
||||
dst_hold(&rth->dst);
|
||||
}
|
||||
|
||||
return rth;
|
||||
|
@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
|
|||
struct net_vrf *vrf = netdev_priv(dev);
|
||||
|
||||
rt = vrf->rt6;
|
||||
atomic_inc(&rt->dst.__refcnt);
|
||||
dst_hold(&rt->dst);
|
||||
}
|
||||
|
||||
return (struct dst_entry *)rt;
|
||||
|
@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
|
|||
{
|
||||
int rc;
|
||||
|
||||
vrf_dst_ops.kmem_cachep =
|
||||
kmem_cache_create("vrf_ip_dst_cache",
|
||||
sizeof(struct rtable), 0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
|
||||
if (!vrf_dst_ops.kmem_cachep)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = init_dst_ops6_kmem_cachep();
|
||||
if (rc != 0)
|
||||
goto error2;
|
||||
|
||||
register_netdevice_notifier(&vrf_notifier_block);
|
||||
|
||||
rc = rtnl_link_register(&vrf_link_ops);
|
||||
|
@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
|
|||
|
||||
error:
|
||||
unregister_netdevice_notifier(&vrf_notifier_block);
|
||||
free_dst_ops6_kmem_cachep();
|
||||
error2:
|
||||
kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit vrf_cleanup_module(void)
|
||||
{
|
||||
rtnl_link_unregister(&vrf_link_ops);
|
||||
unregister_netdevice_notifier(&vrf_notifier_block);
|
||||
kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
|
||||
free_dst_ops6_kmem_cachep();
|
||||
}
|
||||
|
||||
module_init(vrf_init_module);
|
||||
module_exit(vrf_cleanup_module);
|
||||
MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
|
||||
MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
|
|||
INIT_WORK(&wl->firmware_load, b43_request_firmware);
|
||||
schedule_work(&wl->firmware_load);
|
||||
|
||||
bcma_out:
|
||||
return err;
|
||||
|
||||
bcma_err_wireless_exit:
|
||||
ieee80211_free_hw(wl->hw);
|
||||
bcma_out:
|
||||
kfree(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
|
|||
b43_rng_exit(wl);
|
||||
|
||||
b43_leds_unregister(wl);
|
||||
|
||||
ieee80211_free_hw(wl->hw);
|
||||
kfree(wldev->dev);
|
||||
}
|
||||
|
||||
static struct bcma_driver b43_bcma_driver = {
|
||||
|
@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
|
|||
|
||||
b43_leds_unregister(wl);
|
||||
b43_wireless_exit(dev, wl);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static struct ssb_driver b43_ssb_driver = {
|
||||
|
|
|
@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
|||
/* the fw is stopped, the aux sta is dead: clean up driver state */
|
||||
iwl_mvm_del_aux_sta(mvm);
|
||||
|
||||
iwl_free_fw_paging(mvm);
|
||||
|
||||
/*
|
||||
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
|
||||
* won't be called in this case).
|
||||
|
|
|
@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
|
|||
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
|
||||
kfree(mvm->nvm_sections[i].data);
|
||||
|
||||
iwl_free_fw_paging(mvm);
|
||||
|
||||
iwl_mvm_tof_clean(mvm);
|
||||
|
||||
ieee80211_free_hw(mvm->hw);
|
||||
|
|
|
@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
|
|||
*/
|
||||
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
|
||||
if (val & (BIT(1) | BIT(17))) {
|
||||
IWL_INFO(trans,
|
||||
"can't access the RSA semaphore it is write protected\n");
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"can't access the RSA semaphore it is write protected\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
|
|||
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
|
||||
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
|
||||
|
||||
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
|
||||
"pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
|
||||
rtldm->thermalvalue, thermal_value);
|
||||
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
|
||||
"pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
|
||||
rtldm->thermalvalue, thermal_value);
|
||||
/*Record last Power Tracking Thermal Value*/
|
||||
rtldm->thermalvalue = thermal_value;
|
||||
}
|
||||
|
|
|
@ -828,6 +828,11 @@ struct mlx4_vf_dev {
|
|||
u8 n_ports;
|
||||
};
|
||||
|
||||
enum mlx4_pci_status {
|
||||
MLX4_PCI_STATUS_DISABLED,
|
||||
MLX4_PCI_STATUS_ENABLED,
|
||||
};
|
||||
|
||||
struct mlx4_dev_persistent {
|
||||
struct pci_dev *pdev;
|
||||
struct mlx4_dev *dev;
|
||||
|
@ -841,6 +846,8 @@ struct mlx4_dev_persistent {
|
|||
u8 state;
|
||||
struct mutex interface_state_mutex; /* protect SW state */
|
||||
u8 interface_state;
|
||||
struct mutex pci_status_mutex; /* sync pci state */
|
||||
enum mlx4_pci_status pci_status;
|
||||
};
|
||||
|
||||
struct mlx4_dev {
|
||||
|
|
|
@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
|
|||
if (!is_a_nulls(first))
|
||||
first->pprev = &n->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_add_tail_rcu
|
||||
* @n: the element to add to the hash list.
|
||||
* @h: the list to add to.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the end of the specified hlist_nulls,
|
||||
* while permitting racing traversals. NOTE: tail insertion requires
|
||||
* list traversal.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
|
||||
* or hlist_nulls_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs. Regardless of the type of CPU, the
|
||||
* list-traversal primitive must be guarded by rcu_read_lock().
|
||||
*/
|
||||
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
|
||||
struct hlist_nulls_head *h)
|
||||
{
|
||||
struct hlist_nulls_node *i, *last = NULL;
|
||||
|
||||
for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
|
||||
i = hlist_nulls_next_rcu(i))
|
||||
last = i;
|
||||
|
||||
if (last) {
|
||||
n->next = last->next;
|
||||
n->pprev = &last->next;
|
||||
rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
|
||||
} else {
|
||||
hlist_nulls_add_head_rcu(n, h);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/hardirq.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/inet_sock.h>
|
||||
|
||||
#ifdef CONFIG_CGROUP_NET_CLASSID
|
||||
struct cgroup_cls_state {
|
||||
|
@ -63,11 +64,13 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
|
|||
* softirqs always disables bh.
|
||||
*/
|
||||
if (in_serving_softirq()) {
|
||||
struct sock *sk = skb_to_full_sk(skb);
|
||||
|
||||
/* If there is an sock_cgroup_classid we'll use that. */
|
||||
if (!skb->sk)
|
||||
if (!sk || !sk_fullsock(sk))
|
||||
return 0;
|
||||
|
||||
classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data);
|
||||
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
|
||||
}
|
||||
|
||||
return classid;
|
||||
|
|
|
@ -101,6 +101,9 @@ void fib6_force_start_gc(struct net *net);
|
|||
struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
|
||||
const struct in6_addr *addr, bool anycast);
|
||||
|
||||
struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
|
||||
int flags);
|
||||
|
||||
/*
|
||||
* support functions for ND
|
||||
*
|
||||
|
|
|
@ -959,6 +959,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
|||
int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
|
||||
int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
|
||||
int addr_len);
|
||||
int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
|
||||
void ip6_datagram_release_cb(struct sock *sk);
|
||||
|
||||
int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
|
||||
int *addr_len);
|
||||
|
|
|
@ -209,6 +209,9 @@ unsigned int inet_addr_type_dev_table(struct net *net,
|
|||
void ip_rt_multicast_event(struct in_device *);
|
||||
int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
|
||||
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
|
||||
struct rtable *rt_dst_alloc(struct net_device *dev,
|
||||
unsigned int flags, u16 type,
|
||||
bool nopolicy, bool noxfrm, bool will_cache);
|
||||
|
||||
struct in_ifaddr;
|
||||
void fib_add_ifaddr(struct in_ifaddr *);
|
||||
|
|
|
@ -847,6 +847,11 @@ struct sctp_transport {
|
|||
*/
|
||||
ktime_t last_time_heard;
|
||||
|
||||
/* When was the last time that we sent a chunk using this
|
||||
* transport? We use this to check for idle transports
|
||||
*/
|
||||
unsigned long last_time_sent;
|
||||
|
||||
/* Last time(in jiffies) when cwnd is reduced due to the congestion
|
||||
* indication based on ECNE chunk.
|
||||
*/
|
||||
|
@ -952,7 +957,8 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
|
|||
struct sctp_sock *);
|
||||
void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
|
||||
void sctp_transport_free(struct sctp_transport *);
|
||||
void sctp_transport_reset_timers(struct sctp_transport *);
|
||||
void sctp_transport_reset_t3_rtx(struct sctp_transport *);
|
||||
void sctp_transport_reset_hb_timer(struct sctp_transport *);
|
||||
int sctp_transport_hold(struct sctp_transport *);
|
||||
void sctp_transport_put(struct sctp_transport *);
|
||||
void sctp_transport_update_rto(struct sctp_transport *, __u32);
|
||||
|
|
|
@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
|
|||
|
||||
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
|
||||
{
|
||||
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
|
||||
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
|
||||
sk->sk_family == AF_INET6)
|
||||
hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
|
||||
else
|
||||
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
|
||||
}
|
||||
|
||||
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
|
||||
|
|
|
@ -552,6 +552,8 @@ void tcp_send_ack(struct sock *sk);
|
|||
void tcp_send_delayed_ack(struct sock *sk);
|
||||
void tcp_send_loss_probe(struct sock *sk);
|
||||
bool tcp_schedule_loss_probe(struct sock *sk);
|
||||
void tcp_skb_collapse_tstamp(struct sk_buff *skb,
|
||||
const struct sk_buff *next_skb);
|
||||
|
||||
/* tcp_input.c */
|
||||
void tcp_resume_early_retransmit(struct sock *sk);
|
||||
|
|
|
@ -96,6 +96,7 @@ header-y += cyclades.h
|
|||
header-y += cycx_cfm.h
|
||||
header-y += dcbnl.h
|
||||
header-y += dccp.h
|
||||
header-y += devlink.h
|
||||
header-y += dlmconstants.h
|
||||
header-y += dlm_device.h
|
||||
header-y += dlm.h
|
||||
|
|
|
@ -1374,6 +1374,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
|
|||
}
|
||||
|
||||
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
|
||||
BPF_SIZE(insn->code) == BPF_DW ||
|
||||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
|
||||
verbose("BPF_LD_ABS uses reserved fields\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
|
|||
left - sizeof(struct ebt_entry_match) < m->match_size)
|
||||
return -EINVAL;
|
||||
|
||||
match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
|
||||
match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
|
||||
if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
|
||||
request_module("ebt_%s", m->u.name);
|
||||
match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
|
||||
}
|
||||
if (IS_ERR(match))
|
||||
return PTR_ERR(match);
|
||||
m->u.match = match;
|
||||
|
|
|
@ -4502,13 +4502,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
|
|||
__skb_push(skb, offset);
|
||||
err = __vlan_insert_tag(skb, skb->vlan_proto,
|
||||
skb_vlan_tag_get(skb));
|
||||
if (err)
|
||||
if (err) {
|
||||
__skb_pull(skb, offset);
|
||||
return err;
|
||||
}
|
||||
|
||||
skb->protocol = skb->vlan_proto;
|
||||
skb->mac_len += VLAN_HLEN;
|
||||
__skb_pull(skb, offset);
|
||||
|
||||
skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
|
||||
__skb_pull(skb, offset);
|
||||
}
|
||||
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
|
||||
return 0;
|
||||
|
|
|
@ -1034,10 +1034,13 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
|
|||
if (!fld.daddr) {
|
||||
fld.daddr = fld.saddr;
|
||||
|
||||
err = -EADDRNOTAVAIL;
|
||||
if (dev_out)
|
||||
dev_put(dev_out);
|
||||
err = -EINVAL;
|
||||
dev_out = init_net.loopback_dev;
|
||||
if (!dev_out->dn_ptr)
|
||||
goto out;
|
||||
err = -EADDRNOTAVAIL;
|
||||
dev_hold(dev_out);
|
||||
if (!fld.daddr) {
|
||||
fld.daddr =
|
||||
|
@ -1110,6 +1113,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
|
|||
if (dev_out == NULL)
|
||||
goto out;
|
||||
dn_db = rcu_dereference_raw(dev_out->dn_ptr);
|
||||
if (!dn_db)
|
||||
goto e_inval;
|
||||
/* Possible improvement - check all devices for local addr */
|
||||
if (dn_dev_islocal(dev_out, fld.daddr)) {
|
||||
dev_put(dev_out);
|
||||
|
@ -1151,6 +1156,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
|
|||
dev_put(dev_out);
|
||||
dev_out = init_net.loopback_dev;
|
||||
dev_hold(dev_out);
|
||||
if (!dev_out->dn_ptr)
|
||||
goto e_inval;
|
||||
fld.flowidn_oif = dev_out->ifindex;
|
||||
if (res.fi)
|
||||
dn_fib_info_put(res.fi);
|
||||
|
|
|
@ -81,6 +81,12 @@ static int __init arptable_filter_init(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = arptable_filter_table_init(&init_net);
|
||||
if (ret) {
|
||||
unregister_pernet_subsys(&arptable_filter_net_ops);
|
||||
kfree(arpfilter_ops);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
|
|||
#endif
|
||||
}
|
||||
|
||||
static struct rtable *rt_dst_alloc(struct net_device *dev,
|
||||
unsigned int flags, u16 type,
|
||||
bool nopolicy, bool noxfrm, bool will_cache)
|
||||
struct rtable *rt_dst_alloc(struct net_device *dev,
|
||||
unsigned int flags, u16 type,
|
||||
bool nopolicy, bool noxfrm, bool will_cache)
|
||||
{
|
||||
struct rtable *rt;
|
||||
|
||||
|
@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
|
|||
|
||||
return rt;
|
||||
}
|
||||
EXPORT_SYMBOL(rt_dst_alloc);
|
||||
|
||||
/* called in rcu_read_lock() section */
|
||||
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
|
@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
|
|||
*/
|
||||
if (fi && res->prefixlen < 4)
|
||||
fi = NULL;
|
||||
} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
|
||||
(orig_oif != dev_out->ifindex)) {
|
||||
/* For local routes that require a particular output interface
|
||||
* we do not want to cache the result. Caching the result
|
||||
* causes incorrect behaviour when there are multiple source
|
||||
* addresses on the interface, the end result being that if the
|
||||
* intended recipient is waiting on that interface for the
|
||||
* packet he won't receive it because it will be delivered on
|
||||
* the loopback interface and the IP_PKTINFO ipi_ifindex will
|
||||
* be set to the loopback interface as well.
|
||||
*/
|
||||
fi = NULL;
|
||||
}
|
||||
|
||||
fnhe = NULL;
|
||||
|
|
|
@ -1309,6 +1309,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
|||
if (skb == tcp_highest_sack(sk))
|
||||
tcp_advance_highest_sack(sk, skb);
|
||||
|
||||
tcp_skb_collapse_tstamp(prev, skb);
|
||||
tcp_unlink_write_queue(skb, sk);
|
||||
sk_wmem_free_skb(sk, skb);
|
||||
|
||||
|
@ -3098,7 +3099,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
|
|||
|
||||
shinfo = skb_shinfo(skb);
|
||||
if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
|
||||
between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1))
|
||||
!before(shinfo->tskey, prior_snd_una) &&
|
||||
before(shinfo->tskey, tcp_sk(sk)->snd_una))
|
||||
__skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
|
||||
}
|
||||
|
||||
|
|
|
@ -2441,6 +2441,20 @@ u32 __tcp_select_window(struct sock *sk)
|
|||
return window;
|
||||
}
|
||||
|
||||
void tcp_skb_collapse_tstamp(struct sk_buff *skb,
|
||||
const struct sk_buff *next_skb)
|
||||
{
|
||||
const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
|
||||
u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
|
||||
|
||||
if (unlikely(tsflags)) {
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
|
||||
shinfo->tx_flags |= tsflags;
|
||||
shinfo->tskey = next_shinfo->tskey;
|
||||
}
|
||||
}
|
||||
|
||||
/* Collapses two adjacent SKB's during retransmission. */
|
||||
static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
|
@ -2484,6 +2498,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
|
||||
|
||||
tcp_skb_collapse_tstamp(skb, next_skb);
|
||||
|
||||
sk_wmem_free_skb(sk, next_skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -339,8 +339,13 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
|
|||
|
||||
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
|
||||
spin_lock(&hslot2->lock);
|
||||
hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
|
||||
&hslot2->head);
|
||||
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
|
||||
sk->sk_family == AF_INET6)
|
||||
hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
|
||||
&hslot2->head);
|
||||
else
|
||||
hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
|
||||
&hslot2->head);
|
||||
hslot2->count++;
|
||||
spin_unlock(&hslot2->lock);
|
||||
}
|
||||
|
|
|
@ -3255,6 +3255,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|||
void *ptr)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct netdev_notifier_changeupper_info *info;
|
||||
struct inet6_dev *idev = __in6_dev_get(dev);
|
||||
int run_pending = 0;
|
||||
int err;
|
||||
|
@ -3413,6 +3414,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|||
if (idev)
|
||||
addrconf_type_change(dev, event);
|
||||
break;
|
||||
|
||||
case NETDEV_CHANGEUPPER:
|
||||
info = ptr;
|
||||
|
||||
/* flush all routes if dev is linked to or unlinked from
|
||||
* an L3 master device (e.g., VRF)
|
||||
*/
|
||||
if (info->upper_dev && netif_is_l3_master(info->upper_dev))
|
||||
addrconf_ifdown(dev, 0);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
@ -3438,6 +3448,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event)
|
|||
ipv6_mc_unmap(idev);
|
||||
}
|
||||
|
||||
static bool addr_is_local(const struct in6_addr *addr)
|
||||
{
|
||||
return ipv6_addr_type(addr) &
|
||||
(IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
|
||||
}
|
||||
|
||||
static int addrconf_ifdown(struct net_device *dev, int how)
|
||||
{
|
||||
struct net *net = dev_net(dev);
|
||||
|
@ -3495,7 +3511,8 @@ static int addrconf_ifdown(struct net_device *dev, int how)
|
|||
* address is retained on a down event
|
||||
*/
|
||||
if (!keep_addr ||
|
||||
!(ifa->flags & IFA_F_PERMANENT)) {
|
||||
!(ifa->flags & IFA_F_PERMANENT) ||
|
||||
addr_is_local(&ifa->addr)) {
|
||||
hlist_del_init_rcu(&ifa->addr_lst);
|
||||
goto restart;
|
||||
}
|
||||
|
@ -3544,7 +3561,8 @@ static int addrconf_ifdown(struct net_device *dev, int how)
|
|||
write_unlock_bh(&idev->lock);
|
||||
spin_lock_bh(&ifa->lock);
|
||||
|
||||
if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) {
|
||||
if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
|
||||
!addr_is_local(&ifa->addr)) {
|
||||
/* set state to skip the notifier below */
|
||||
state = INET6_IFADDR_STATE_DEAD;
|
||||
ifa->state = 0;
|
||||
|
|
|
@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
|
|||
return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
|
||||
}
|
||||
|
||||
static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
||||
memset(fl6, 0, sizeof(*fl6));
|
||||
fl6->flowi6_proto = sk->sk_protocol;
|
||||
fl6->daddr = sk->sk_v6_daddr;
|
||||
fl6->saddr = np->saddr;
|
||||
fl6->flowi6_oif = sk->sk_bound_dev_if;
|
||||
fl6->flowi6_mark = sk->sk_mark;
|
||||
fl6->fl6_dport = inet->inet_dport;
|
||||
fl6->fl6_sport = inet->inet_sport;
|
||||
fl6->flowlabel = np->flow_label;
|
||||
|
||||
if (!fl6->flowi6_oif)
|
||||
fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
|
||||
|
||||
if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
|
||||
fl6->flowi6_oif = np->mcast_oif;
|
||||
|
||||
security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
|
||||
}
|
||||
|
||||
int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
|
||||
{
|
||||
struct ip6_flowlabel *flowlabel = NULL;
|
||||
struct in6_addr *final_p, final;
|
||||
struct ipv6_txoptions *opt;
|
||||
struct dst_entry *dst;
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct flowi6 fl6;
|
||||
int err = 0;
|
||||
|
||||
if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
|
||||
flowlabel = fl6_sock_lookup(sk, np->flow_label);
|
||||
if (!flowlabel)
|
||||
return -EINVAL;
|
||||
}
|
||||
ip6_datagram_flow_key_init(&fl6, sk);
|
||||
|
||||
rcu_read_lock();
|
||||
opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
|
||||
final_p = fl6_update_dst(&fl6, opt, &final);
|
||||
rcu_read_unlock();
|
||||
|
||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
||||
if (IS_ERR(dst)) {
|
||||
err = PTR_ERR(dst);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (fix_sk_saddr) {
|
||||
if (ipv6_addr_any(&np->saddr))
|
||||
np->saddr = fl6.saddr;
|
||||
|
||||
if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
|
||||
sk->sk_v6_rcv_saddr = fl6.saddr;
|
||||
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
|
||||
if (sk->sk_prot->rehash)
|
||||
sk->sk_prot->rehash(sk);
|
||||
}
|
||||
}
|
||||
|
||||
ip6_dst_store(sk, dst,
|
||||
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
|
||||
&sk->sk_v6_daddr : NULL,
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
|
||||
&np->saddr :
|
||||
#endif
|
||||
NULL);
|
||||
|
||||
out:
|
||||
fl6_sock_release(flowlabel);
|
||||
return err;
|
||||
}
|
||||
|
||||
void ip6_datagram_release_cb(struct sock *sk)
|
||||
{
|
||||
struct dst_entry *dst;
|
||||
|
||||
if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
dst = __sk_dst_get(sk);
|
||||
if (!dst || !dst->obsolete ||
|
||||
dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
ip6_datagram_dst_update(sk, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
|
||||
|
||||
static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
{
|
||||
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct in6_addr *daddr, *final_p, final;
|
||||
struct dst_entry *dst;
|
||||
struct flowi6 fl6;
|
||||
struct ip6_flowlabel *flowlabel = NULL;
|
||||
struct ipv6_txoptions *opt;
|
||||
struct in6_addr *daddr;
|
||||
int addr_type;
|
||||
int err;
|
||||
__be32 fl6_flowlabel = 0;
|
||||
|
||||
if (usin->sin6_family == AF_INET) {
|
||||
if (__ipv6_only_sock(sk))
|
||||
|
@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
|
|||
if (usin->sin6_family != AF_INET6)
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
memset(&fl6, 0, sizeof(fl6));
|
||||
if (np->sndflow) {
|
||||
fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
|
||||
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
|
||||
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
|
||||
if (!flowlabel)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
if (np->sndflow)
|
||||
fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
|
||||
|
||||
addr_type = ipv6_addr_type(&usin->sin6_addr);
|
||||
|
||||
|
@ -145,7 +234,7 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
|
|||
}
|
||||
|
||||
sk->sk_v6_daddr = *daddr;
|
||||
np->flow_label = fl6.flowlabel;
|
||||
np->flow_label = fl6_flowlabel;
|
||||
|
||||
inet->inet_dport = usin->sin6_port;
|
||||
|
||||
|
@ -154,59 +243,13 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
|
|||
* destination cache for it.
|
||||
*/
|
||||
|
||||
fl6.flowi6_proto = sk->sk_protocol;
|
||||
fl6.daddr = sk->sk_v6_daddr;
|
||||
fl6.saddr = np->saddr;
|
||||
fl6.flowi6_oif = sk->sk_bound_dev_if;
|
||||
fl6.flowi6_mark = sk->sk_mark;
|
||||
fl6.fl6_dport = inet->inet_dport;
|
||||
fl6.fl6_sport = inet->inet_sport;
|
||||
|
||||
if (!fl6.flowi6_oif)
|
||||
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
|
||||
|
||||
if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
|
||||
fl6.flowi6_oif = np->mcast_oif;
|
||||
|
||||
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
|
||||
|
||||
rcu_read_lock();
|
||||
opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
|
||||
final_p = fl6_update_dst(&fl6, opt, &final);
|
||||
rcu_read_unlock();
|
||||
|
||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
||||
err = 0;
|
||||
if (IS_ERR(dst)) {
|
||||
err = PTR_ERR(dst);
|
||||
err = ip6_datagram_dst_update(sk, true);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* source address lookup done in ip6_dst_lookup */
|
||||
|
||||
if (ipv6_addr_any(&np->saddr))
|
||||
np->saddr = fl6.saddr;
|
||||
|
||||
if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
|
||||
sk->sk_v6_rcv_saddr = fl6.saddr;
|
||||
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
|
||||
if (sk->sk_prot->rehash)
|
||||
sk->sk_prot->rehash(sk);
|
||||
}
|
||||
|
||||
ip6_dst_store(sk, dst,
|
||||
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
|
||||
&sk->sk_v6_daddr : NULL,
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
|
||||
&np->saddr :
|
||||
#endif
|
||||
NULL);
|
||||
|
||||
sk->sk_state = TCP_ESTABLISHED;
|
||||
sk_set_txhash(sk);
|
||||
out:
|
||||
fl6_sock_release(flowlabel);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
|
|||
return rt;
|
||||
}
|
||||
|
||||
static struct rt6_info *ip6_dst_alloc(struct net *net,
|
||||
struct net_device *dev,
|
||||
int flags)
|
||||
struct rt6_info *ip6_dst_alloc(struct net *net,
|
||||
struct net_device *dev,
|
||||
int flags)
|
||||
{
|
||||
struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
|
||||
|
||||
|
@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net,
|
|||
|
||||
return rt;
|
||||
}
|
||||
EXPORT_SYMBOL(ip6_dst_alloc);
|
||||
|
||||
static void ip6_dst_destroy(struct dst_entry *dst)
|
||||
{
|
||||
|
@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
|
|||
|
||||
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
|
||||
{
|
||||
struct dst_entry *dst;
|
||||
|
||||
ip6_update_pmtu(skb, sock_net(sk), mtu,
|
||||
sk->sk_bound_dev_if, sk->sk_mark);
|
||||
|
||||
dst = __sk_dst_get(sk);
|
||||
if (!dst || !dst->obsolete ||
|
||||
dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
|
||||
return;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
|
||||
ip6_datagram_dst_update(sk, false);
|
||||
bh_unlock_sock(sk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
|
||||
|
||||
|
|
|
@ -1539,6 +1539,7 @@ struct proto udpv6_prot = {
|
|||
.sendmsg = udpv6_sendmsg,
|
||||
.recvmsg = udpv6_recvmsg,
|
||||
.backlog_rcv = __udpv6_queue_rcv_skb,
|
||||
.release_cb = ip6_datagram_release_cb,
|
||||
.hash = udp_lib_hash,
|
||||
.unhash = udp_lib_unhash,
|
||||
.rehash = udp_v6_rehash,
|
||||
|
|
|
@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb,
|
|||
length--;
|
||||
continue;
|
||||
default:
|
||||
if (length < 2)
|
||||
return;
|
||||
opsize=*ptr++;
|
||||
if (opsize < 2) /* "silly options" */
|
||||
return;
|
||||
|
@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
|
|||
length--;
|
||||
continue;
|
||||
default:
|
||||
if (length < 2)
|
||||
return;
|
||||
opsize = *ptr++;
|
||||
if (opsize < 2) /* "silly options" */
|
||||
return;
|
||||
|
|
|
@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock)
|
|||
|
||||
skb_queue_purge(&sk->sk_write_queue);
|
||||
|
||||
if (nlk->portid) {
|
||||
if (nlk->portid && nlk->bound) {
|
||||
struct netlink_notify n = {
|
||||
.net = sock_net(sk),
|
||||
.protocol = sk->sk_protocol,
|
||||
|
|
|
@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|||
mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
|
||||
|
||||
if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
|
||||
set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
|
||||
set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
|
||||
true);
|
||||
memcpy(&flow_key->ipv6.addr.src, masked,
|
||||
sizeof(flow_key->ipv6.addr.src));
|
||||
|
@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
|||
NULL, &flags)
|
||||
!= NEXTHDR_ROUTING);
|
||||
|
||||
set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
|
||||
set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
|
||||
recalc_csum);
|
||||
memcpy(&flow_key->ipv6.addr.dst, masked,
|
||||
sizeof(flow_key->ipv6.addr.dst));
|
||||
|
|
|
@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
|
|||
} else if (key->eth.type == htons(ETH_P_IPV6)) {
|
||||
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
|
||||
|
||||
skb_orphan(skb);
|
||||
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
|
||||
err = nf_ct_frag6_gather(net, skb, user);
|
||||
if (err)
|
||||
|
|
|
@ -3521,6 +3521,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
|
|||
i->ifindex = mreq->mr_ifindex;
|
||||
i->alen = mreq->mr_alen;
|
||||
memcpy(i->addr, mreq->mr_address, i->alen);
|
||||
memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
|
||||
i->count = 1;
|
||||
i->next = po->mclist;
|
||||
po->mclist = i;
|
||||
|
|
|
@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
|
|||
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
|
||||
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
|
||||
|
||||
__set_bit_le(off, (void *)map->m_page_addrs[i]);
|
||||
set_bit_le(off, (void *)map->m_page_addrs[i]);
|
||||
}
|
||||
|
||||
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
|
||||
|
@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
|
|||
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
|
||||
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
|
||||
|
||||
__clear_bit_le(off, (void *)map->m_page_addrs[i]);
|
||||
clear_bit_le(off, (void *)map->m_page_addrs[i]);
|
||||
}
|
||||
|
||||
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
|
||||
|
|
|
@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
|
|||
dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
|
||||
dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
|
||||
dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
|
||||
dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
|
||||
dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
|
||||
|
||||
/* Advertise flow control */
|
||||
if (ic->i_flowctl) {
|
||||
|
|
|
@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
|
|||
if (validate)
|
||||
skb = validate_xmit_skb_list(skb, dev);
|
||||
|
||||
if (skb) {
|
||||
if (likely(skb)) {
|
||||
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
||||
if (!netif_xmit_frozen_or_stopped(txq))
|
||||
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
|
||||
|
||||
HARD_TX_UNLOCK(dev, txq);
|
||||
} else {
|
||||
spin_lock(root_lock);
|
||||
return qdisc_qlen(q);
|
||||
}
|
||||
spin_lock(root_lock);
|
||||
|
||||
|
|
|
@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
|
|||
* sender MUST assure that at least one T3-rtx
|
||||
* timer is running.
|
||||
*/
|
||||
if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
|
||||
sctp_transport_reset_timers(transport);
|
||||
if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
|
||||
sctp_transport_reset_t3_rtx(transport);
|
||||
transport->last_time_sent = jiffies;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
|
|||
error = sctp_outq_flush_rtx(q, packet,
|
||||
rtx_timeout, &start_timer);
|
||||
|
||||
if (start_timer)
|
||||
sctp_transport_reset_timers(transport);
|
||||
if (start_timer) {
|
||||
sctp_transport_reset_t3_rtx(transport);
|
||||
transport->last_time_sent = jiffies;
|
||||
}
|
||||
|
||||
/* This can happen on COOKIE-ECHO resend. Only
|
||||
* one chunk can get bundled with a COOKIE-ECHO.
|
||||
|
@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
|
|||
list_add_tail(&chunk->transmitted_list,
|
||||
&transport->transmitted);
|
||||
|
||||
sctp_transport_reset_timers(transport);
|
||||
sctp_transport_reset_t3_rtx(transport);
|
||||
transport->last_time_sent = jiffies;
|
||||
|
||||
/* Only let one DATA chunk get bundled with a
|
||||
* COOKIE-ECHO chunk.
|
||||
|
|
|
@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
|
|||
return SCTP_ERROR_RSRC_LOW;
|
||||
|
||||
/* Start the heartbeat timer. */
|
||||
if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
|
||||
sctp_transport_hold(peer);
|
||||
sctp_transport_reset_hb_timer(peer);
|
||||
asoc->new_transport = peer;
|
||||
break;
|
||||
case SCTP_PARAM_DEL_IP:
|
||||
|
|
|
@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
|
|||
sctp_cmd_seq_t *commands,
|
||||
gfp_t gfp);
|
||||
|
||||
static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
|
||||
struct sctp_transport *t);
|
||||
/********************************************************************
|
||||
* Helper functions
|
||||
********************************************************************/
|
||||
|
@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
|
|||
struct sctp_association *asoc = transport->asoc;
|
||||
struct sock *sk = asoc->base.sk;
|
||||
struct net *net = sock_net(sk);
|
||||
u32 elapsed, timeout;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk)) {
|
||||
|
@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Check if we should still send the heartbeat or reschedule */
|
||||
elapsed = jiffies - transport->last_time_sent;
|
||||
timeout = sctp_transport_timeout(transport);
|
||||
if (elapsed < timeout) {
|
||||
elapsed = timeout - elapsed;
|
||||
if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
|
||||
sctp_transport_hold(transport);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
|
||||
SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
|
||||
asoc->state, asoc->ep, asoc,
|
||||
|
@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
|
|||
0);
|
||||
|
||||
/* Update the hb timer to resend a heartbeat every rto */
|
||||
sctp_cmd_hb_timer_update(commands, transport);
|
||||
sctp_transport_reset_hb_timer(transport);
|
||||
}
|
||||
|
||||
if (transport->state != SCTP_INACTIVE &&
|
||||
|
@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
|
|||
* hold a reference on the transport to make sure none of
|
||||
* the needed data structures go away.
|
||||
*/
|
||||
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
|
||||
|
||||
if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
|
||||
sctp_transport_hold(t);
|
||||
}
|
||||
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
|
||||
sctp_transport_reset_hb_timer(t);
|
||||
}
|
||||
|
||||
static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
|
||||
|
@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
|
|||
}
|
||||
|
||||
|
||||
/* Helper function to update the heartbeat timer. */
|
||||
static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
|
||||
struct sctp_transport *t)
|
||||
{
|
||||
/* Update the heartbeat timer. */
|
||||
if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
|
||||
sctp_transport_hold(t);
|
||||
}
|
||||
|
||||
/* Helper function to handle the reception of an HEARTBEAT ACK. */
|
||||
static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
|
||||
struct sctp_association *asoc,
|
||||
|
@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
|
|||
sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
|
||||
|
||||
/* Update the heartbeat timer. */
|
||||
if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
|
||||
sctp_transport_hold(t);
|
||||
sctp_transport_reset_hb_timer(t);
|
||||
|
||||
if (was_unconfirmed && asoc->peer.transport_count == 1)
|
||||
sctp_transport_immediate_rtx(t);
|
||||
|
@ -1614,7 +1610,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
|
|||
|
||||
case SCTP_CMD_HB_TIMER_UPDATE:
|
||||
t = cmd->obj.transport;
|
||||
sctp_cmd_hb_timer_update(commands, t);
|
||||
sctp_transport_reset_hb_timer(t);
|
||||
break;
|
||||
|
||||
case SCTP_CMD_HB_TIMERS_STOP:
|
||||
|
|
|
@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
|
|||
/* Start T3_rtx timer if it is not already running and update the heartbeat
|
||||
* timer. This routine is called every time a DATA chunk is sent.
|
||||
*/
|
||||
void sctp_transport_reset_timers(struct sctp_transport *transport)
|
||||
void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
|
||||
{
|
||||
/* RFC 2960 6.3.2 Retransmission Timer Rules
|
||||
*
|
||||
|
@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
|
|||
if (!mod_timer(&transport->T3_rtx_timer,
|
||||
jiffies + transport->rto))
|
||||
sctp_transport_hold(transport);
|
||||
}
|
||||
|
||||
void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
|
||||
{
|
||||
unsigned long expires;
|
||||
|
||||
/* When a data chunk is sent, reset the heartbeat interval. */
|
||||
if (!mod_timer(&transport->hb_timer,
|
||||
sctp_transport_timeout(transport)))
|
||||
sctp_transport_hold(transport);
|
||||
expires = jiffies + sctp_transport_timeout(transport);
|
||||
if (time_before(transport->hb_timer.expires, expires) &&
|
||||
!mod_timer(&transport->hb_timer,
|
||||
expires + prandom_u32_max(transport->rto)))
|
||||
sctp_transport_hold(transport);
|
||||
}
|
||||
|
||||
/* This transport has been assigned to an association.
|
||||
|
@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
|
|||
unsigned long sctp_transport_timeout(struct sctp_transport *trans)
|
||||
{
|
||||
/* RTO + timer slack +/- 50% of RTO */
|
||||
unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
|
||||
unsigned long timeout = trans->rto >> 1;
|
||||
|
||||
if (trans->state != SCTP_UNCONFIRMED &&
|
||||
trans->state != SCTP_PF)
|
||||
timeout += trans->hbinterval;
|
||||
|
||||
return timeout + jiffies;
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/* Reset transport variables to their initial values */
|
||||
|
|
|
@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
|
|||
if (err)
|
||||
goto out_nametbl;
|
||||
|
||||
INIT_LIST_HEAD(&tn->dist_queue);
|
||||
err = tipc_topsrv_start(net);
|
||||
if (err)
|
||||
goto out_subscr;
|
||||
|
|
|
@ -103,6 +103,9 @@ struct tipc_net {
|
|||
spinlock_t nametbl_lock;
|
||||
struct name_table *nametbl;
|
||||
|
||||
/* Name dist queue */
|
||||
struct list_head dist_queue;
|
||||
|
||||
/* Topology subscription server */
|
||||
struct tipc_server *topsrv;
|
||||
atomic_t subscription_count;
|
||||
|
|
|
@ -40,11 +40,6 @@
|
|||
|
||||
int sysctl_tipc_named_timeout __read_mostly = 2000;
|
||||
|
||||
/**
|
||||
* struct tipc_dist_queue - queue holding deferred name table updates
|
||||
*/
|
||||
static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
|
||||
|
||||
struct distr_queue_item {
|
||||
struct distr_item i;
|
||||
u32 dtype;
|
||||
|
@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
|
|||
kfree_rcu(p, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* tipc_dist_queue_purge - remove deferred updates from a node that went down
|
||||
*/
|
||||
static void tipc_dist_queue_purge(struct net *net, u32 addr)
|
||||
{
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
struct distr_queue_item *e, *tmp;
|
||||
|
||||
spin_lock_bh(&tn->nametbl_lock);
|
||||
list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
|
||||
if (e->node != addr)
|
||||
continue;
|
||||
list_del(&e->next);
|
||||
kfree(e);
|
||||
}
|
||||
spin_unlock_bh(&tn->nametbl_lock);
|
||||
}
|
||||
|
||||
void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
|
||||
{
|
||||
struct publication *publ, *tmp;
|
||||
|
||||
list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
|
||||
tipc_publ_purge(net, publ, addr);
|
||||
tipc_dist_queue_purge(net, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
|
|||
* tipc_named_add_backlog - add a failed name table update to the backlog
|
||||
*
|
||||
*/
|
||||
static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
|
||||
static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
|
||||
u32 type, u32 node)
|
||||
{
|
||||
struct distr_queue_item *e;
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
unsigned long now = get_jiffies_64();
|
||||
|
||||
e = kzalloc(sizeof(*e), GFP_ATOMIC);
|
||||
|
@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
|
|||
e->node = node;
|
||||
e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
|
||||
memcpy(e, i, sizeof(*i));
|
||||
list_add_tail(&e->next, &tipc_dist_queue);
|
||||
list_add_tail(&e->next, &tn->dist_queue);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
|
|||
void tipc_named_process_backlog(struct net *net)
|
||||
{
|
||||
struct distr_queue_item *e, *tmp;
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
char addr[16];
|
||||
unsigned long now = get_jiffies_64();
|
||||
|
||||
list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
|
||||
list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
|
||||
if (time_after(e->expires, now)) {
|
||||
if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
|
||||
continue;
|
||||
|
@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
|
|||
node = msg_orignode(msg);
|
||||
while (count--) {
|
||||
if (!tipc_update_nametbl(net, item, node, mtype))
|
||||
tipc_named_add_backlog(item, mtype, node);
|
||||
tipc_named_add_backlog(net, item, mtype, node);
|
||||
item++;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
|
|||
/* Retrieve the head sk_buff from the socket's receive queue. */
|
||||
err = 0;
|
||||
skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!skb)
|
||||
return -EAGAIN;
|
||||
return err;
|
||||
|
||||
dg = (struct vmci_datagram *)skb->data;
|
||||
if (!dg)
|
||||
|
@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit);
|
|||
|
||||
MODULE_AUTHOR("VMware, Inc.");
|
||||
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
|
||||
MODULE_VERSION("1.0.3.0-k");
|
||||
MODULE_VERSION("1.0.4.0-k");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("vmware_vsock");
|
||||
MODULE_ALIAS_NETPROTO(PF_VSOCK);
|
||||
|
|
|
@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
|
|||
struct wireless_dev *wdev;
|
||||
struct cfg80211_beacon_registration *reg, *tmp;
|
||||
|
||||
if (state != NETLINK_URELEASE)
|
||||
if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
|
@ -3,3 +3,4 @@ psock_fanout
|
|||
psock_tpacket
|
||||
reuseport_bpf
|
||||
reuseport_bpf_cpu
|
||||
reuseport_dualstack
|
||||
|
|
|
@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g
|
|||
|
||||
CFLAGS += -I../../../../usr/include/
|
||||
|
||||
NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu
|
||||
NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack
|
||||
|
||||
all: $(NET_PROGS)
|
||||
%: %.c
|
||||
|
|
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
* It is possible to use SO_REUSEPORT to open multiple sockets bound to
|
||||
* equivalent local addresses using AF_INET and AF_INET6 at the same time. If
|
||||
* the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
|
||||
* receive a given incoming packet. However, when it is not set, incoming v4
|
||||
* packets should prefer the AF_INET socket(s). This behavior was defined with
|
||||
* the original SO_REUSEPORT implementation, but broke with
|
||||
* e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
|
||||
* This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
|
||||
* AF_INET preference for v4 packets.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <errno.h>
|
||||
#include <error.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static const int PORT = 8888;
|
||||
|
||||
static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
|
||||
{
|
||||
struct sockaddr_storage addr;
|
||||
struct sockaddr_in *addr4;
|
||||
struct sockaddr_in6 *addr6;
|
||||
int opt, i;
|
||||
|
||||
switch (family) {
|
||||
case AF_INET:
|
||||
addr4 = (struct sockaddr_in *)&addr;
|
||||
addr4->sin_family = AF_INET;
|
||||
addr4->sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
addr4->sin_port = htons(PORT);
|
||||
break;
|
||||
case AF_INET6:
|
||||
addr6 = (struct sockaddr_in6 *)&addr;
|
||||
addr6->sin6_family = AF_INET6;
|
||||
addr6->sin6_addr = in6addr_any;
|
||||
addr6->sin6_port = htons(PORT);
|
||||
break;
|
||||
default:
|
||||
error(1, 0, "Unsupported family %d", family);
|
||||
}
|
||||
|
||||
for (i = 0; i < count; ++i) {
|
||||
rcv_fds[i] = socket(family, proto, 0);
|
||||
if (rcv_fds[i] < 0)
|
||||
error(1, errno, "failed to create receive socket");
|
||||
|
||||
opt = 1;
|
||||
if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
|
||||
sizeof(opt)))
|
||||
error(1, errno, "failed to set SO_REUSEPORT");
|
||||
|
||||
if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
|
||||
error(1, errno, "failed to bind receive socket");
|
||||
|
||||
if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
|
||||
error(1, errno, "failed to listen on receive port");
|
||||
}
|
||||
}
|
||||
|
||||
static void send_from_v4(int proto)
|
||||
{
|
||||
struct sockaddr_in saddr, daddr;
|
||||
int fd;
|
||||
|
||||
saddr.sin_family = AF_INET;
|
||||
saddr.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
saddr.sin_port = 0;
|
||||
|
||||
daddr.sin_family = AF_INET;
|
||||
daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
|
||||
daddr.sin_port = htons(PORT);
|
||||
|
||||
fd = socket(AF_INET, proto, 0);
|
||||
if (fd < 0)
|
||||
error(1, errno, "failed to create send socket");
|
||||
|
||||
if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
|
||||
error(1, errno, "failed to bind send socket");
|
||||
|
||||
if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
|
||||
error(1, errno, "failed to connect send socket");
|
||||
|
||||
if (send(fd, "a", 1, 0) < 0)
|
||||
error(1, errno, "failed to send message");
|
||||
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static int receive_once(int epfd, int proto)
|
||||
{
|
||||
struct epoll_event ev;
|
||||
int i, fd;
|
||||
char buf[8];
|
||||
|
||||
i = epoll_wait(epfd, &ev, 1, -1);
|
||||
if (i < 0)
|
||||
error(1, errno, "epoll_wait failed");
|
||||
|
||||
if (proto == SOCK_STREAM) {
|
||||
fd = accept(ev.data.fd, NULL, NULL);
|
||||
if (fd < 0)
|
||||
error(1, errno, "failed to accept");
|
||||
i = recv(fd, buf, sizeof(buf), 0);
|
||||
close(fd);
|
||||
} else {
|
||||
i = recv(ev.data.fd, buf, sizeof(buf), 0);
|
||||
}
|
||||
|
||||
if (i < 0)
|
||||
error(1, errno, "failed to recv");
|
||||
|
||||
return ev.data.fd;
|
||||
}
|
||||
|
||||
static void test(int *rcv_fds, int count, int proto)
|
||||
{
|
||||
struct epoll_event ev;
|
||||
int epfd, i, test_fd;
|
||||
uint16_t test_family;
|
||||
socklen_t len;
|
||||
|
||||
epfd = epoll_create(1);
|
||||
if (epfd < 0)
|
||||
error(1, errno, "failed to create epoll");
|
||||
|
||||
ev.events = EPOLLIN;
|
||||
for (i = 0; i < count; ++i) {
|
||||
ev.data.fd = rcv_fds[i];
|
||||
if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
|
||||
error(1, errno, "failed to register sock epoll");
|
||||
}
|
||||
|
||||
send_from_v4(proto);
|
||||
|
||||
test_fd = receive_once(epfd, proto);
|
||||
if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
|
||||
error(1, errno, "failed to read socket domain");
|
||||
if (test_family != AF_INET)
|
||||
error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
|
||||
test_family);
|
||||
|
||||
close(epfd);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
int rcv_fds[32], i;
|
||||
|
||||
fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
|
||||
build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
|
||||
build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
|
||||
test(rcv_fds, 10, SOCK_DGRAM);
|
||||
for (i = 0; i < 10; ++i)
|
||||
close(rcv_fds[i]);
|
||||
|
||||
fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
|
||||
build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
|
||||
build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
|
||||
test(rcv_fds, 10, SOCK_DGRAM);
|
||||
for (i = 0; i < 10; ++i)
|
||||
close(rcv_fds[i]);
|
||||
|
||||
/* NOTE: UDP socket lookups traverse a different code path when there
|
||||
* are > 10 sockets in a group.
|
||||
*/
|
||||
fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
|
||||
build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
|
||||
build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
|
||||
test(rcv_fds, 32, SOCK_DGRAM);
|
||||
for (i = 0; i < 32; ++i)
|
||||
close(rcv_fds[i]);
|
||||
|
||||
fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
|
||||
build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
|
||||
build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
|
||||
test(rcv_fds, 32, SOCK_DGRAM);
|
||||
for (i = 0; i < 32; ++i)
|
||||
close(rcv_fds[i]);
|
||||
|
||||
fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
|
||||
build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
|
||||
build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
|
||||
test(rcv_fds, 10, SOCK_STREAM);
|
||||
for (i = 0; i < 10; ++i)
|
||||
close(rcv_fds[i]);
|
||||
|
||||
fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
|
||||
build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
|
||||
build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
|
||||
test(rcv_fds, 10, SOCK_STREAM);
|
||||
for (i = 0; i < 10; ++i)
|
||||
close(rcv_fds[i]);
|
||||
|
||||
fprintf(stderr, "SUCCESS\n");
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue