Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) stmmac_drv_probe() can race with stmmac_open() because we register the netdevice too early. Fix from Florian Fainelli. 2) UFO handling in __ip6_append_data() and ip6_finish_output() use different tests for deciding whether a frame will be fragmented or not, put them in sync. Fix from Zheng Li. 3) The rtnetlink getstats handlers need to validate that the netlink request is large enough, fix from Mathias Krause. 4) Use after free in mlx4 driver, from Jack Morgenstein. 5) Fix setting of garbage UID value in sockets during setattr() calls, from Eric Biggers. 6) Packet drop_monitor doesn't format the netlink messages properly such that nlmsg_next fails to work, fix from Reiter Wolfgang. 7) Fix handling of wildcard addresses in l2tp lookups, from Guillaume Nault. 8) __skb_flow_dissect() can crash on pptp packets, from Ian Kumlien. 9) IGMP code doesn't reset group query timers properly, from Michal Tesar. 10) Fix overzealous MAIN/LOCAL route table combining in ipv4, from Alexander Duyck. 11) vxlan offload check needs to be more strict in be2net driver, from Sabrina Dubroca. 12) Moving l3mdev to packet hooks lost RX stat counters unintentionally, fix from David Ahern. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (52 commits) sh_eth: enable RX descriptor word 0 shift on SH7734 sfc: don't report RX hash keys to ethtool when RSS wasn't enabled dpaa_eth: Initialize CGR structure before init dpaa_eth: cleanup after init_phy() failure net: systemport: Pad packet before inserting TSB net: systemport: Utilize skb_put_padto() LiquidIO VF: s/select/imply/ for PTP_1588_CLOCK libcxgb: fix error check for ip6_route_output() net: usb: asix_devices: add .reset_resume for USB PM net: vrf: Add missing Rx counters drop_monitor: consider inserted data in genlmsg_end benet: stricter vxlan offloading check in be_features_check ipv4: Do not allow MAIN to be alias for new LOCAL w/ custom rules net: macb: Updated resource allocation function calls to new version of API. net: stmmac: dwmac-oxnas: use generic pm implementation net: stmmac: dwmac-oxnas: fix fixed-link-phydev leaks net: stmmac: dwmac-oxnas: fix of-node leak Documentation/networking: fix typo in mpls-sysctl igmp: Make igmp group member RFC 3376 compliant flow_dissector: Update pptp handling to avoid null pointer deref. ...
This commit is contained in:
commit
4cf184638b
|
@ -5,8 +5,8 @@ platform_labels - INTEGER
|
|||
possible to configure forwarding for label values equal to or
|
||||
greater than the number of platform labels.
|
||||
|
||||
A dense utliziation of the entries in the platform label table
|
||||
is possible and expected aas the platform labels are locally
|
||||
A dense utilization of the entries in the platform label table
|
||||
is possible and expected as the platform labels are locally
|
||||
allocated.
|
||||
|
||||
If the number of platform label table entries is set to 0 no
|
||||
|
|
|
@ -1682,9 +1682,19 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
|
|||
size += ret;
|
||||
}
|
||||
|
||||
if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
|
||||
flow_attr->num_of_specs == 1) {
|
||||
struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
|
||||
enum ib_flow_spec_type header_spec =
|
||||
((union ib_flow_spec *)(flow_attr + 1))->type;
|
||||
|
||||
if (header_spec == IB_FLOW_SPEC_ETH)
|
||||
mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
|
||||
}
|
||||
|
||||
ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
|
||||
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
MLX4_CMD_NATIVE);
|
||||
if (ret == -ENOMEM)
|
||||
pr_err("mcg table is full. Fail to register network rule.\n");
|
||||
else if (ret == -ENXIO)
|
||||
|
@ -1701,7 +1711,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
|
|||
int err;
|
||||
err = mlx4_cmd(dev, reg_id, 0, 0,
|
||||
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
pr_err("Fail to detach network rule. registration id = 0x%llx\n",
|
||||
reg_id);
|
||||
|
|
|
@ -1012,6 +1012,18 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* The Ethernet switch we are interfaced with needs packets to be at
|
||||
* least 64 bytes (including FCS) otherwise they will be discarded when
|
||||
* they enter the switch port logic. When Broadcom tags are enabled, we
|
||||
* need to make sure that packets are at least 68 bytes
|
||||
* (including FCS and tag) because the length verification is done after
|
||||
* the Broadcom tag is stripped off the ingress packet.
|
||||
*/
|
||||
if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
|
||||
ret = NETDEV_TX_OK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Insert TSB and checksum infos */
|
||||
if (priv->tsb_en) {
|
||||
skb = bcm_sysport_insert_tsb(skb, dev);
|
||||
|
@ -1021,20 +1033,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
|
|||
}
|
||||
}
|
||||
|
||||
/* The Ethernet switch we are interfaced with needs packets to be at
|
||||
* least 64 bytes (including FCS) otherwise they will be discarded when
|
||||
* they enter the switch port logic. When Broadcom tags are enabled, we
|
||||
* need to make sure that packets are at least 68 bytes
|
||||
* (including FCS and tag) because the length verification is done after
|
||||
* the Broadcom tag is stripped off the ingress packet.
|
||||
*/
|
||||
if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
|
||||
ret = NETDEV_TX_OK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
|
||||
ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
|
||||
skb_len = skb->len;
|
||||
|
||||
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(kdev, mapping)) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* macb_pci.c - Cadence GEM PCI wrapper.
|
||||
* Cadence GEM PCI wrapper.
|
||||
*
|
||||
* Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
|
||||
*
|
||||
|
@ -45,32 +45,27 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
struct macb_platform_data plat_data;
|
||||
struct resource res[2];
|
||||
|
||||
/* sanity check */
|
||||
if (!id)
|
||||
return -EINVAL;
|
||||
|
||||
/* enable pci device */
|
||||
err = pci_enable_device(pdev);
|
||||
err = pcim_enable_device(pdev);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "Enabling PCI device has failed: 0x%04X",
|
||||
err);
|
||||
return -EACCES;
|
||||
dev_err(&pdev->dev, "Enabling PCI device has failed: %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
/* set up resources */
|
||||
memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
|
||||
res[0].start = pdev->resource[0].start;
|
||||
res[0].end = pdev->resource[0].end;
|
||||
res[0].start = pci_resource_start(pdev, 0);
|
||||
res[0].end = pci_resource_end(pdev, 0);
|
||||
res[0].name = PCI_DRIVER_NAME;
|
||||
res[0].flags = IORESOURCE_MEM;
|
||||
res[1].start = pdev->irq;
|
||||
res[1].start = pci_irq_vector(pdev, 0);
|
||||
res[1].name = PCI_DRIVER_NAME;
|
||||
res[1].flags = IORESOURCE_IRQ;
|
||||
|
||||
dev_info(&pdev->dev, "EMAC physical base addr = 0x%p\n",
|
||||
(void *)(uintptr_t)pci_resource_start(pdev, 0));
|
||||
dev_info(&pdev->dev, "EMAC physical base addr: %pa\n",
|
||||
&res[0].start);
|
||||
|
||||
/* set up macb platform data */
|
||||
memset(&plat_data, 0, sizeof(plat_data));
|
||||
|
@ -100,7 +95,7 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
plat_info.num_res = ARRAY_SIZE(res);
|
||||
plat_info.data = &plat_data;
|
||||
plat_info.size_data = sizeof(plat_data);
|
||||
plat_info.dma_mask = DMA_BIT_MASK(32);
|
||||
plat_info.dma_mask = pdev->dma_mask;
|
||||
|
||||
/* register platform device */
|
||||
plat_dev = platform_device_register_full(&plat_info);
|
||||
|
@ -120,7 +115,6 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
clk_unregister(plat_data.pclk);
|
||||
|
||||
err_pclk_register:
|
||||
pci_disable_device(pdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -130,7 +124,6 @@ static void macb_remove(struct pci_dev *pdev)
|
|||
struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
|
||||
|
||||
platform_device_unregister(plat_dev);
|
||||
pci_disable_device(pdev);
|
||||
clk_unregister(plat_data->pclk);
|
||||
clk_unregister(plat_data->hclk);
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ config OCTEON_MGMT_ETHERNET
|
|||
config LIQUIDIO_VF
|
||||
tristate "Cavium LiquidIO VF support"
|
||||
depends on 64BIT && PCI_MSI
|
||||
select PTP_1588_CLOCK
|
||||
imply PTP_1588_CLOCK
|
||||
---help---
|
||||
This driver supports Cavium LiquidIO Intelligent Server Adapter
|
||||
based on CN23XX chips.
|
||||
|
|
|
@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
|
|||
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
|
||||
fl6.flowi6_oif = sin6_scope_id;
|
||||
dst = ip6_route_output(&init_net, NULL, &fl6);
|
||||
if (!dst)
|
||||
goto out;
|
||||
if (!cxgb_our_interface(lldi, get_real_dev,
|
||||
ip6_dst_idev(dst)->dev) &&
|
||||
!(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
|
||||
if (dst->error ||
|
||||
(!cxgb_our_interface(lldi, get_real_dev,
|
||||
ip6_dst_idev(dst)->dev) &&
|
||||
!(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
|
||||
dst_release(dst);
|
||||
dst = NULL;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return dst;
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb_find_route6);
|
||||
|
|
|
@ -5155,7 +5155,9 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
|
|||
skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
|
||||
skb->inner_protocol != htons(ETH_P_TEB) ||
|
||||
skb_inner_mac_header(skb) - skb_transport_header(skb) !=
|
||||
sizeof(struct udphdr) + sizeof(struct vxlanhdr))
|
||||
sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
|
||||
!adapter->vxlan_port ||
|
||||
udp_hdr(skb)->dest != adapter->vxlan_port)
|
||||
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
||||
|
||||
return features;
|
||||
|
|
|
@ -733,6 +733,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
|
|||
priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
|
||||
|
||||
/* Enable Congestion State Change Notifications and CS taildrop */
|
||||
memset(&initcgr, 0, sizeof(initcgr));
|
||||
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
|
||||
initcgr.cgr.cscn_en = QM_CGR_EN;
|
||||
|
||||
|
@ -2291,7 +2292,8 @@ static int dpaa_open(struct net_device *net_dev)
|
|||
net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
|
||||
if (!net_dev->phydev) {
|
||||
netif_err(priv, ifup, net_dev, "init_phy() failed\n");
|
||||
return -ENODEV;
|
||||
err = -ENODEV;
|
||||
goto phy_init_failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
|
||||
|
@ -2314,6 +2316,7 @@ static int dpaa_open(struct net_device *net_dev)
|
|||
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
|
||||
fman_port_disable(mac_dev->port[i]);
|
||||
|
||||
phy_init_failed:
|
||||
dpaa_eth_napi_disable(priv);
|
||||
|
||||
return err;
|
||||
|
@ -2420,6 +2423,7 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
|
|||
}
|
||||
|
||||
/* Enable CS TD, but disable Congestion State Change Notifications. */
|
||||
memset(&initcgr, 0, sizeof(initcgr));
|
||||
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
|
||||
initcgr.cgr.cscn_en = QM_CGR_EN;
|
||||
cs_th = DPAA_INGRESS_CS_THRESHOLD;
|
||||
|
|
|
@ -245,13 +245,9 @@ static u32 freq_to_shift(u16 freq)
|
|||
{
|
||||
u32 freq_khz = freq * 1000;
|
||||
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
|
||||
u64 tmp_rounded =
|
||||
roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
|
||||
roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
|
||||
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
|
||||
max_val_cycles : tmp_rounded;
|
||||
u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
|
||||
/* calculate max possible multiplier in order to fit in 64bit */
|
||||
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
|
||||
u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
|
||||
|
||||
/* This comes from the reverse of clocksource_khz2mult */
|
||||
return ilog2(div_u64(max_mul * freq_khz, 1000000));
|
||||
|
|
|
@ -445,8 +445,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
|
|||
ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
|
||||
|
||||
ring->stride = stride;
|
||||
if (ring->stride <= TXBB_SIZE)
|
||||
if (ring->stride <= TXBB_SIZE) {
|
||||
/* Stamp first unused send wqe */
|
||||
__be32 *ptr = (__be32 *)ring->buf;
|
||||
__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
|
||||
*ptr = stamp;
|
||||
/* Move pointer to start of rx section */
|
||||
ring->buf += TXBB_SIZE;
|
||||
}
|
||||
|
||||
ring->log_stride = ffs(ring->stride) - 1;
|
||||
ring->buf_size = ring->size * ring->stride;
|
||||
|
|
|
@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
|
|||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (offset_in_page(buf)) {
|
||||
dma_free_coherent(dev, PAGE_SIZE << order,
|
||||
buf, sg_dma_address(mem));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sg_set_buf(mem, buf, PAGE_SIZE << order);
|
||||
BUG_ON(mem->offset);
|
||||
sg_dma_len(mem) = PAGE_SIZE << order;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <linux/io-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <net/devlink.h>
|
||||
|
||||
#include <linux/mlx4/device.h>
|
||||
|
@ -782,6 +783,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
|
|||
}
|
||||
EXPORT_SYMBOL(mlx4_is_slave_active);
|
||||
|
||||
void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
|
||||
struct _rule_hw *eth_header)
|
||||
{
|
||||
if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
|
||||
is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
|
||||
struct mlx4_net_trans_rule_hw_eth *eth =
|
||||
(struct mlx4_net_trans_rule_hw_eth *)eth_header;
|
||||
struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
|
||||
bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
|
||||
next_rule->rsvd == 0;
|
||||
|
||||
if (last_rule)
|
||||
ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
|
||||
|
||||
static void slave_adjust_steering_mode(struct mlx4_dev *dev,
|
||||
struct mlx4_dev_cap *dev_cap,
|
||||
struct mlx4_init_hca_param *hca_param)
|
||||
|
|
|
@ -4164,22 +4164,6 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
|
||||
struct _rule_hw *eth_header)
|
||||
{
|
||||
if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
|
||||
is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
|
||||
struct mlx4_net_trans_rule_hw_eth *eth =
|
||||
(struct mlx4_net_trans_rule_hw_eth *)eth_header;
|
||||
struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
|
||||
bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
|
||||
next_rule->rsvd == 0;
|
||||
|
||||
if (last_rule)
|
||||
ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In case of missing eth header, append eth header with a MAC address
|
||||
* assigned to the VF.
|
||||
|
@ -4363,10 +4347,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
|
||||
|
||||
if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
|
||||
handle_eth_header_mcast_prio(ctrl, rule_header);
|
||||
|
||||
if (slave == dev->caps.function)
|
||||
goto execute;
|
||||
mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
|
||||
|
||||
switch (header_id) {
|
||||
case MLX4_NET_TRANS_RULE_ID_ETH:
|
||||
|
@ -4394,7 +4375,6 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
goto err_put_qp;
|
||||
}
|
||||
|
||||
execute:
|
||||
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
|
||||
vhcr->in_modifier, 0,
|
||||
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
|
||||
|
@ -4473,6 +4453,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
struct res_qp *rqp;
|
||||
struct res_fs_rule *rrule;
|
||||
u64 mirr_reg_id;
|
||||
int qpn;
|
||||
|
||||
if (dev->caps.steering_mode !=
|
||||
MLX4_STEERING_MODE_DEVICE_MANAGED)
|
||||
|
@ -4489,10 +4470,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
}
|
||||
mirr_reg_id = rrule->mirr_rule_id;
|
||||
kfree(rrule->mirr_mbox);
|
||||
qpn = rrule->qpn;
|
||||
|
||||
/* Release the rule form busy state before removal */
|
||||
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
|
||||
err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
|
||||
err = get_res(dev, slave, qpn, RES_QP, &rqp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -4517,7 +4499,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
if (!err)
|
||||
atomic_dec(&rqp->ref_count);
|
||||
out:
|
||||
put_res(dev, slave, rrule->qpn, RES_QP);
|
||||
put_res(dev, slave, qpn, RES_QP);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -723,6 +723,9 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
|
|||
int i;
|
||||
struct ieee_ets ets;
|
||||
|
||||
if (!MLX5_CAP_GEN(priv->mdev, ets))
|
||||
return;
|
||||
|
||||
memset(&ets, 0, sizeof(ets));
|
||||
ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
|
||||
for (i = 0; i < ets.ets_cap; i++) {
|
||||
|
|
|
@ -171,7 +171,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
|
|||
return NUM_SW_COUNTERS +
|
||||
MLX5E_NUM_Q_CNTRS(priv) +
|
||||
NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
|
||||
NUM_PCIE_COUNTERS +
|
||||
MLX5E_NUM_RQ_STATS(priv) +
|
||||
MLX5E_NUM_SQ_STATS(priv) +
|
||||
MLX5E_NUM_PFC_COUNTERS(priv) +
|
||||
|
@ -219,14 +218,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
|
|||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_2819_stats_desc[i].format);
|
||||
|
||||
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pcie_perf_stats_desc[i].format);
|
||||
|
||||
for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pcie_tas_stats_desc[i].format);
|
||||
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
|
@ -339,14 +330,6 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
|
|||
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
|
||||
pport_2819_stats_desc, i);
|
||||
|
||||
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
|
||||
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
|
||||
pcie_perf_stats_desc, i);
|
||||
|
||||
for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
|
||||
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
|
||||
pcie_tas_stats_desc, i);
|
||||
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
|
||||
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
|
||||
|
|
|
@ -247,6 +247,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
|
|||
}
|
||||
if (fs->flow_type & FLOW_MAC_EXT &&
|
||||
!is_zero_ether_addr(fs->m_ext.h_dest)) {
|
||||
mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
|
||||
outer_headers_c, dmac_47_16),
|
||||
fs->m_ext.h_dest);
|
||||
|
|
|
@ -291,36 +291,12 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
|
|||
&qcnt->rx_out_of_buffer);
|
||||
}
|
||||
|
||||
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
|
||||
void *out;
|
||||
u32 *in;
|
||||
|
||||
in = mlx5_vzalloc(sz);
|
||||
if (!in)
|
||||
return;
|
||||
|
||||
out = pcie_stats->pcie_perf_counters;
|
||||
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
|
||||
|
||||
out = pcie_stats->pcie_tas_counters;
|
||||
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
|
||||
|
||||
kvfree(in);
|
||||
}
|
||||
|
||||
void mlx5e_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_update_q_counter(priv);
|
||||
mlx5e_update_vport_counters(priv);
|
||||
mlx5e_update_pport_counters(priv);
|
||||
mlx5e_update_sw_counters(priv);
|
||||
mlx5e_update_pcie_counters(priv);
|
||||
}
|
||||
|
||||
void mlx5e_update_stats_work(struct work_struct *work)
|
||||
|
@ -3805,14 +3781,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
|||
|
||||
mlx5_lag_add(mdev, netdev);
|
||||
|
||||
if (mlx5e_vxlan_allowed(mdev)) {
|
||||
rtnl_lock();
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
mlx5e_enable_async_events(priv);
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
|
||||
if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
|
||||
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
|
||||
|
@ -3822,6 +3791,18 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
|||
rep.netdev = netdev;
|
||||
mlx5_eswitch_register_vport_rep(esw, 0, &rep);
|
||||
}
|
||||
|
||||
if (netdev->reg_state != NETREG_REGISTERED)
|
||||
return;
|
||||
|
||||
/* Device already registered: sync netdev system state */
|
||||
if (mlx5e_vxlan_allowed(mdev)) {
|
||||
rtnl_lock();
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
}
|
||||
|
||||
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
|
||||
|
@ -3966,10 +3947,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
|
|||
const struct mlx5e_profile *profile = priv->profile;
|
||||
|
||||
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||
if (profile->disable)
|
||||
profile->disable(priv);
|
||||
|
||||
flush_workqueue(priv->wq);
|
||||
|
||||
rtnl_lock();
|
||||
if (netif_running(netdev))
|
||||
|
@ -3977,6 +3954,10 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
|
|||
netif_device_detach(netdev);
|
||||
rtnl_unlock();
|
||||
|
||||
if (profile->disable)
|
||||
profile->disable(priv);
|
||||
flush_workqueue(priv->wq);
|
||||
|
||||
mlx5e_destroy_q_counter(priv);
|
||||
profile->cleanup_rx(priv);
|
||||
mlx5e_close_drop_rq(priv);
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
|
||||
(*(u32 *)((char *)ptr + dsc[i].offset))
|
||||
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
|
||||
be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
|
||||
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
|
||||
|
||||
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
|
||||
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
|
||||
|
@ -276,32 +276,6 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
|
|||
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
|
||||
};
|
||||
|
||||
#define PCIE_PERF_OFF(c) \
|
||||
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
|
||||
#define PCIE_PERF_GET(pcie_stats, c) \
|
||||
MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
|
||||
counter_set.pcie_perf_cntrs_grp_data_layout.c)
|
||||
#define PCIE_TAS_OFF(c) \
|
||||
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
|
||||
#define PCIE_TAS_GET(pcie_stats, c) \
|
||||
MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
|
||||
counter_set.pcie_tas_cntrs_grp_data_layout.c)
|
||||
|
||||
struct mlx5e_pcie_stats {
|
||||
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
|
||||
__be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
|
||||
};
|
||||
|
||||
static const struct counter_desc pcie_perf_stats_desc[] = {
|
||||
{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
|
||||
{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
|
||||
};
|
||||
|
||||
static const struct counter_desc pcie_tas_stats_desc[] = {
|
||||
{ "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
|
||||
{ "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
|
||||
};
|
||||
|
||||
struct mlx5e_rq_stats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
|
@ -386,8 +360,6 @@ static const struct counter_desc sq_stats_desc[] = {
|
|||
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
|
||||
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
|
||||
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
|
||||
#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
|
||||
#define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc)
|
||||
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
|
||||
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
|
||||
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
|
||||
|
@ -397,7 +369,6 @@ static const struct counter_desc sq_stats_desc[] = {
|
|||
NUM_PPORT_2819_COUNTERS + \
|
||||
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
|
||||
NUM_PPORT_PRIO)
|
||||
#define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
|
||||
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
|
||||
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
|
||||
|
||||
|
@ -406,7 +377,6 @@ struct mlx5e_stats {
|
|||
struct mlx5e_qcounter_stats qcnt;
|
||||
struct mlx5e_vport_stats vport;
|
||||
struct mlx5e_pport_stats pport;
|
||||
struct mlx5e_pcie_stats pcie;
|
||||
struct rtnl_link_stats64 vf_vport;
|
||||
};
|
||||
|
||||
|
|
|
@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|||
|
||||
if (!ESW_ALLOWED(esw))
|
||||
return -EPERM;
|
||||
if (!LEGAL_VPORT(esw, vport))
|
||||
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
|
|
|
@ -695,6 +695,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
|||
if (err)
|
||||
goto err_reps;
|
||||
}
|
||||
|
||||
/* disable PF RoCE so missed packets don't go through RoCE steering */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
return 0;
|
||||
|
||||
err_reps:
|
||||
|
@ -718,6 +724,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
|
|||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
|
||||
/* enable back PF RoCE */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
mlx5_eswitch_disable_sriov(esw);
|
||||
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
|
||||
if (err) {
|
||||
|
|
|
@ -1263,6 +1263,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
|
|||
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
|
||||
handle = add_rule_fte(fte, fg, dest, dest_num, false);
|
||||
if (IS_ERR(handle)) {
|
||||
unlock_ref_node(&fte->node);
|
||||
kfree(fte);
|
||||
goto unlock_fg;
|
||||
}
|
||||
|
|
|
@ -503,6 +503,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
|
|||
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
|
||||
to_fw_pkey_sz(dev, 128));
|
||||
|
||||
/* Check log_max_qp from HCA caps to set in current profile */
|
||||
if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
|
||||
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
|
||||
profile[prof_sel].log_max_qp,
|
||||
MLX5_CAP_GEN_MAX(dev, log_max_qp));
|
||||
profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
|
||||
}
|
||||
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
|
||||
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
|
||||
prof->log_max_qp);
|
||||
|
@ -575,7 +582,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|||
struct mlx5_priv *priv = &mdev->priv;
|
||||
struct msix_entry *msix = priv->msix_arr;
|
||||
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
|
||||
int numa_node = priv->numa_node;
|
||||
int err;
|
||||
|
||||
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
|
||||
|
@ -583,7 +589,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
|
||||
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
|
||||
priv->irq_info[i].mask);
|
||||
|
||||
err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
|
||||
|
@ -1189,6 +1195,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
mlx5_drain_health_wq(dev);
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
|
||||
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
|
||||
|
@ -1351,10 +1359,9 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
|
|||
|
||||
mlx5_enter_error_state(dev);
|
||||
mlx5_unload_one(dev, priv, false);
|
||||
/* In case of kernel call save the pci state and drain health wq */
|
||||
/* In case of kernel call save the pci state */
|
||||
if (state) {
|
||||
pci_save_state(pdev);
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_pci_disable_device(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -819,6 +819,7 @@ static struct sh_eth_cpu_data sh7734_data = {
|
|||
.tsu = 1,
|
||||
.hw_crc = 1,
|
||||
.select_mii = 1,
|
||||
.shift_rd0 = 1,
|
||||
};
|
||||
|
||||
/* SH7763 */
|
||||
|
@ -1656,7 +1657,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
|
|||
else
|
||||
goto out;
|
||||
|
||||
if (!likely(mdp->irq_enabled)) {
|
||||
if (unlikely(!mdp->irq_enabled)) {
|
||||
sh_eth_write(ndev, 0, EESIPR);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1323,7 +1323,8 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
|
|||
}
|
||||
|
||||
/* don't fail init if RSS setup doesn't work */
|
||||
efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
|
||||
rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
|
||||
efx->rss_active = (rc == 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -975,6 +975,8 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
|
|||
|
||||
case ETHTOOL_GRXFH: {
|
||||
info->data = 0;
|
||||
if (!efx->rss_active) /* No RSS */
|
||||
return 0;
|
||||
switch (info->flow_type) {
|
||||
case UDP_V4_FLOW:
|
||||
if (efx->rx_hash_udp_4tuple)
|
||||
|
|
|
@ -860,6 +860,7 @@ struct vfdi_status;
|
|||
* @rx_hash_key: Toeplitz hash key for RSS
|
||||
* @rx_indir_table: Indirection table for RSS
|
||||
* @rx_scatter: Scatter mode enabled for receives
|
||||
* @rss_active: RSS enabled on hardware
|
||||
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
|
||||
* @int_error_count: Number of internal errors seen recently
|
||||
* @int_error_expire: Time at which error count will be expired
|
||||
|
@ -998,6 +999,7 @@ struct efx_nic {
|
|||
u8 rx_hash_key[40];
|
||||
u32 rx_indir_table[128];
|
||||
bool rx_scatter;
|
||||
bool rss_active;
|
||||
bool rx_hash_udp_4tuple;
|
||||
|
||||
unsigned int_error_count;
|
||||
|
|
|
@ -403,6 +403,7 @@ static int siena_init_nic(struct efx_nic *efx)
|
|||
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
|
||||
|
||||
siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
|
||||
efx->rss_active = true;
|
||||
|
||||
/* Enable event logging */
|
||||
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
|
||||
|
|
|
@ -60,8 +60,9 @@ struct oxnas_dwmac {
|
|||
struct regmap *regmap;
|
||||
};
|
||||
|
||||
static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
|
||||
static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = priv;
|
||||
unsigned int value;
|
||||
int ret;
|
||||
|
||||
|
@ -105,20 +106,20 @@ static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = priv;
|
||||
|
||||
clk_disable_unprepare(dwmac->clk);
|
||||
}
|
||||
|
||||
static int oxnas_dwmac_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct plat_stmmacenet_data *plat_dat;
|
||||
struct stmmac_resources stmmac_res;
|
||||
struct device_node *sysctrl;
|
||||
struct oxnas_dwmac *dwmac;
|
||||
int ret;
|
||||
|
||||
sysctrl = of_parse_phandle(pdev->dev.of_node, "oxsemi,sys-ctrl", 0);
|
||||
if (!sysctrl) {
|
||||
dev_err(&pdev->dev, "failed to get sys-ctrl node\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -128,73 +129,49 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(plat_dat);
|
||||
|
||||
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
|
||||
if (!dwmac)
|
||||
return -ENOMEM;
|
||||
if (!dwmac) {
|
||||
ret = -ENOMEM;
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
|
||||
dwmac->dev = &pdev->dev;
|
||||
plat_dat->bsp_priv = dwmac;
|
||||
plat_dat->init = oxnas_dwmac_init;
|
||||
plat_dat->exit = oxnas_dwmac_exit;
|
||||
|
||||
dwmac->regmap = syscon_node_to_regmap(sysctrl);
|
||||
dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
|
||||
"oxsemi,sys-ctrl");
|
||||
if (IS_ERR(dwmac->regmap)) {
|
||||
dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
|
||||
return PTR_ERR(dwmac->regmap);
|
||||
ret = PTR_ERR(dwmac->regmap);
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
|
||||
dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
|
||||
if (IS_ERR(dwmac->clk))
|
||||
return PTR_ERR(dwmac->clk);
|
||||
if (IS_ERR(dwmac->clk)) {
|
||||
ret = PTR_ERR(dwmac->clk);
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
|
||||
ret = oxnas_dwmac_init(dwmac);
|
||||
ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_remove_config_dt;
|
||||
|
||||
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
if (ret)
|
||||
clk_disable_unprepare(dwmac->clk);
|
||||
goto err_dwmac_exit;
|
||||
|
||||
|
||||
return 0;
|
||||
|
||||
err_dwmac_exit:
|
||||
oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
|
||||
err_remove_config_dt:
|
||||
stmmac_remove_config_dt(pdev, plat_dat);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int oxnas_dwmac_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
|
||||
int ret = stmmac_dvr_remove(&pdev->dev);
|
||||
|
||||
clk_disable_unprepare(dwmac->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int oxnas_dwmac_suspend(struct device *dev)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
|
||||
int ret;
|
||||
|
||||
ret = stmmac_suspend(dev);
|
||||
clk_disable_unprepare(dwmac->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int oxnas_dwmac_resume(struct device *dev)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
|
||||
int ret;
|
||||
|
||||
ret = oxnas_dwmac_init(dwmac);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = stmmac_resume(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(oxnas_dwmac_pm_ops,
|
||||
oxnas_dwmac_suspend, oxnas_dwmac_resume);
|
||||
|
||||
static const struct of_device_id oxnas_dwmac_match[] = {
|
||||
{ .compatible = "oxsemi,ox820-dwmac" },
|
||||
{ }
|
||||
|
@ -203,10 +180,10 @@ MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
|
|||
|
||||
static struct platform_driver oxnas_dwmac_driver = {
|
||||
.probe = oxnas_dwmac_probe,
|
||||
.remove = oxnas_dwmac_remove,
|
||||
.remove = stmmac_pltfr_remove,
|
||||
.driver = {
|
||||
.name = "oxnas-dwmac",
|
||||
.pm = &oxnas_dwmac_pm_ops,
|
||||
.pm = &stmmac_pltfr_pm_ops,
|
||||
.of_match_table = oxnas_dwmac_match,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -3339,13 +3339,6 @@ int stmmac_dvr_probe(struct device *device,
|
|||
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
|
||||
__func__, ret);
|
||||
goto error_netdev_register;
|
||||
}
|
||||
|
||||
/* If a specific clk_csr value is passed from the platform
|
||||
* this means that the CSR Clock Range selection cannot be
|
||||
* changed at run-time and it is fixed. Viceversa the driver'll try to
|
||||
|
@ -3372,11 +3365,21 @@ int stmmac_dvr_probe(struct device *device,
|
|||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
|
||||
__func__, ret);
|
||||
goto error_netdev_register;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
error_mdio_register:
|
||||
unregister_netdev(ndev);
|
||||
error_netdev_register:
|
||||
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
|
||||
priv->hw->pcs != STMMAC_PCS_TBI &&
|
||||
priv->hw->pcs != STMMAC_PCS_RTBI)
|
||||
stmmac_mdio_unregister(ndev);
|
||||
error_mdio_register:
|
||||
netif_napi_del(&priv->napi);
|
||||
error_hw_init:
|
||||
clk_disable_unprepare(priv->pclk);
|
||||
|
|
|
@ -1367,6 +1367,7 @@ static struct usb_driver asix_driver = {
|
|||
.probe = usbnet_probe,
|
||||
.suspend = asix_suspend,
|
||||
.resume = asix_resume,
|
||||
.reset_resume = asix_resume,
|
||||
.disconnect = usbnet_disconnect,
|
||||
.supports_autosuspend = 1,
|
||||
.disable_hub_initiated_lpm = 1,
|
||||
|
|
|
@ -967,6 +967,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
|||
*/
|
||||
need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
|
||||
if (!ipv6_ndisc_frame(skb) && !need_strict) {
|
||||
vrf_rx_stats(vrf_dev, skb->len);
|
||||
skb->dev = vrf_dev;
|
||||
skb->skb_iif = vrf_dev->ifindex;
|
||||
|
||||
|
@ -1011,6 +1012,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
vrf_rx_stats(vrf_dev, skb->len);
|
||||
|
||||
skb_push(skb, skb->mac_len);
|
||||
dev_queue_xmit_nit(skb, vrf_dev);
|
||||
skb_pull(skb, skb->mac_len);
|
||||
|
|
|
@ -218,7 +218,7 @@ static int slic_ds26522_probe(struct spi_device *spi)
|
|||
|
||||
ret = slic_ds26522_init_configure(spi);
|
||||
if (ret == 0)
|
||||
pr_info("DS26522 cs%d configurated\n", spi->chip_select);
|
||||
pr_info("DS26522 cs%d configured\n", spi->chip_select);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1384,6 +1384,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
|
|||
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
|
||||
int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
|
||||
bool *vlan_offload_disabled);
|
||||
void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
|
||||
struct _rule_hw *eth_header);
|
||||
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
|
||||
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
|
||||
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
|
||||
|
|
|
@ -1071,11 +1071,6 @@ enum {
|
|||
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
|
||||
MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
|
||||
};
|
||||
|
||||
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
|
||||
{
|
||||
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
|
||||
|
|
|
@ -123,7 +123,6 @@ enum {
|
|||
MLX5_REG_HOST_ENDIANNESS = 0x7004,
|
||||
MLX5_REG_MCIA = 0x9014,
|
||||
MLX5_REG_MLCR = 0x902b,
|
||||
MLX5_REG_MPCNT = 0x9051,
|
||||
};
|
||||
|
||||
enum mlx5_dcbx_oper_mode {
|
||||
|
|
|
@ -1757,80 +1757,6 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
|
|||
u8 reserved_at_4c0[0x300];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
|
||||
u8 life_time_counter_high[0x20];
|
||||
|
||||
u8 life_time_counter_low[0x20];
|
||||
|
||||
u8 rx_errors[0x20];
|
||||
|
||||
u8 tx_errors[0x20];
|
||||
|
||||
u8 l0_to_recovery_eieos[0x20];
|
||||
|
||||
u8 l0_to_recovery_ts[0x20];
|
||||
|
||||
u8 l0_to_recovery_framing[0x20];
|
||||
|
||||
u8 l0_to_recovery_retrain[0x20];
|
||||
|
||||
u8 crc_error_dllp[0x20];
|
||||
|
||||
u8 crc_error_tlp[0x20];
|
||||
|
||||
u8 reserved_at_140[0x680];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
|
||||
u8 life_time_counter_high[0x20];
|
||||
|
||||
u8 life_time_counter_low[0x20];
|
||||
|
||||
u8 time_to_boot_image_start[0x20];
|
||||
|
||||
u8 time_to_link_image[0x20];
|
||||
|
||||
u8 calibration_time[0x20];
|
||||
|
||||
u8 time_to_first_perst[0x20];
|
||||
|
||||
u8 time_to_detect_state[0x20];
|
||||
|
||||
u8 time_to_l0[0x20];
|
||||
|
||||
u8 time_to_crs_en[0x20];
|
||||
|
||||
u8 time_to_plastic_image_start[0x20];
|
||||
|
||||
u8 time_to_iron_image_start[0x20];
|
||||
|
||||
u8 perst_handler[0x20];
|
||||
|
||||
u8 times_in_l1[0x20];
|
||||
|
||||
u8 times_in_l23[0x20];
|
||||
|
||||
u8 dl_down[0x20];
|
||||
|
||||
u8 config_cycle1usec[0x20];
|
||||
|
||||
u8 config_cycle2to7usec[0x20];
|
||||
|
||||
u8 config_cycle_8to15usec[0x20];
|
||||
|
||||
u8 config_cycle_16_to_63usec[0x20];
|
||||
|
||||
u8 config_cycle_64usec[0x20];
|
||||
|
||||
u8 correctable_err_msg_sent[0x20];
|
||||
|
||||
u8 non_fatal_err_msg_sent[0x20];
|
||||
|
||||
u8 fatal_err_msg_sent[0x20];
|
||||
|
||||
u8 reserved_at_2e0[0x4e0];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_cmd_inter_comp_event_bits {
|
||||
u8 command_completion_vector[0x20];
|
||||
|
||||
|
@ -2995,12 +2921,6 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
|
|||
u8 reserved_at_0[0x7c0];
|
||||
};
|
||||
|
||||
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
|
||||
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
|
||||
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
|
||||
u8 reserved_at_0[0x7c0];
|
||||
};
|
||||
|
||||
union mlx5_ifc_event_auto_bits {
|
||||
struct mlx5_ifc_comp_event_bits comp_event;
|
||||
struct mlx5_ifc_dct_events_bits dct_events;
|
||||
|
@ -7320,18 +7240,6 @@ struct mlx5_ifc_ppcnt_reg_bits {
|
|||
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mpcnt_reg_bits {
|
||||
u8 reserved_at_0[0x8];
|
||||
u8 pcie_index[0x8];
|
||||
u8 reserved_at_10[0xa];
|
||||
u8 grp[0x6];
|
||||
|
||||
u8 clr[0x1];
|
||||
u8 reserved_at_21[0x1f];
|
||||
|
||||
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_ppad_reg_bits {
|
||||
u8 reserved_at_0[0x3];
|
||||
u8 single_mac[0x1];
|
||||
|
@ -7937,7 +7845,6 @@ union mlx5_ifc_ports_control_registers_document_bits {
|
|||
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
|
||||
struct mlx5_ifc_ppad_reg_bits ppad_reg;
|
||||
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
|
||||
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
|
||||
struct mlx5_ifc_pplm_reg_bits pplm_reg;
|
||||
struct mlx5_ifc_pplr_reg_bits pplr_reg;
|
||||
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
|
||||
|
|
|
@ -1059,7 +1059,9 @@ static void __exit lane_module_cleanup(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
remove_proc_entry("lec", atm_proc_root);
|
||||
#endif
|
||||
|
||||
deregister_atm_ioctl(&lane_ioctl_ops);
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
|
|||
struct nlattr *nla;
|
||||
struct sk_buff *skb;
|
||||
unsigned long flags;
|
||||
void *msg_header;
|
||||
|
||||
al = sizeof(struct net_dm_alert_msg);
|
||||
al += dm_hit_limit * sizeof(struct net_dm_drop_point);
|
||||
|
@ -82,21 +83,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
|
|||
|
||||
skb = genlmsg_new(al, GFP_KERNEL);
|
||||
|
||||
if (skb) {
|
||||
genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
|
||||
0, NET_DM_CMD_ALERT);
|
||||
nla = nla_reserve(skb, NLA_UNSPEC,
|
||||
sizeof(struct net_dm_alert_msg));
|
||||
msg = nla_data(nla);
|
||||
memset(msg, 0, al);
|
||||
} else {
|
||||
mod_timer(&data->send_timer, jiffies + HZ / 10);
|
||||
}
|
||||
if (!skb)
|
||||
goto err;
|
||||
|
||||
msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
|
||||
0, NET_DM_CMD_ALERT);
|
||||
if (!msg_header) {
|
||||
nlmsg_free(skb);
|
||||
skb = NULL;
|
||||
goto err;
|
||||
}
|
||||
nla = nla_reserve(skb, NLA_UNSPEC,
|
||||
sizeof(struct net_dm_alert_msg));
|
||||
if (!nla) {
|
||||
nlmsg_free(skb);
|
||||
skb = NULL;
|
||||
goto err;
|
||||
}
|
||||
msg = nla_data(nla);
|
||||
memset(msg, 0, al);
|
||||
goto out;
|
||||
|
||||
err:
|
||||
mod_timer(&data->send_timer, jiffies + HZ / 10);
|
||||
out:
|
||||
spin_lock_irqsave(&data->lock, flags);
|
||||
swap(data->skb, skb);
|
||||
spin_unlock_irqrestore(&data->lock, flags);
|
||||
|
||||
if (skb) {
|
||||
struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
|
||||
struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
|
||||
|
||||
genlmsg_end(skb, genlmsg_data(gnlh));
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
|
|
@ -468,8 +468,9 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
|
|||
if (hdr->flags & GRE_ACK)
|
||||
offset += sizeof(((struct pptp_gre_header *)0)->ack);
|
||||
|
||||
ppp_hdr = skb_header_pointer(skb, nhoff + offset,
|
||||
sizeof(_ppp_hdr), _ppp_hdr);
|
||||
ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
|
||||
sizeof(_ppp_hdr),
|
||||
data, hlen, _ppp_hdr);
|
||||
if (!ppp_hdr)
|
||||
goto out_bad;
|
||||
|
||||
|
|
|
@ -3898,6 +3898,9 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
u32 filter_mask;
|
||||
int err;
|
||||
|
||||
if (nlmsg_len(nlh) < sizeof(*ifsm))
|
||||
return -EINVAL;
|
||||
|
||||
ifsm = nlmsg_data(nlh);
|
||||
if (ifsm->ifindex > 0)
|
||||
dev = __dev_get_by_index(net, ifsm->ifindex);
|
||||
|
@ -3947,6 +3950,9 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
|
||||
cb->seq = net->dev_base_seq;
|
||||
|
||||
if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
|
||||
return -EINVAL;
|
||||
|
||||
ifsm = nlmsg_data(cb->nlh);
|
||||
filter_mask = ifsm->filter_mask;
|
||||
if (!filter_mask)
|
||||
|
|
|
@ -85,7 +85,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
|
|||
if (tb)
|
||||
return tb;
|
||||
|
||||
if (id == RT_TABLE_LOCAL)
|
||||
if (id == RT_TABLE_LOCAL && !net->ipv4.fib_has_custom_rules)
|
||||
alias = fib_new_table(net, RT_TABLE_MAIN);
|
||||
|
||||
tb = fib_trie_table(id, alias);
|
||||
|
|
|
@ -219,9 +219,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
|
|||
static void igmp_gq_start_timer(struct in_device *in_dev)
|
||||
{
|
||||
int tv = prandom_u32() % in_dev->mr_maxdelay;
|
||||
unsigned long exp = jiffies + tv + 2;
|
||||
|
||||
if (in_dev->mr_gq_running &&
|
||||
time_after_eq(exp, (in_dev->mr_gq_timer).expires))
|
||||
return;
|
||||
|
||||
in_dev->mr_gq_running = 1;
|
||||
if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
|
||||
if (!mod_timer(&in_dev->mr_gq_timer, exp))
|
||||
in_dev_hold(in_dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -1225,8 +1225,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
|
|||
* which has interface index (iif) as the first member of the
|
||||
* underlying inet{6}_skb_parm struct. This code then overlays
|
||||
* PKTINFO_SKB_CB and in_pktinfo also has iif as the first
|
||||
* element so the iif is picked up from the prior IPCB
|
||||
* element so the iif is picked up from the prior IPCB. If iif
|
||||
* is the loopback interface, then return the sending interface
|
||||
* (e.g., process binds socket to eth0 for Tx which is
|
||||
* redirected to loopback in the rtable/dst).
|
||||
*/
|
||||
if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
|
||||
pktinfo->ipi_ifindex = inet_iif(skb);
|
||||
|
||||
pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
|
||||
} else {
|
||||
pktinfo->ipi_ifindex = 0;
|
||||
|
|
|
@ -1914,7 +1914,8 @@ out: return err;
|
|||
}
|
||||
}
|
||||
|
||||
rth = rt_dst_alloc(net->loopback_dev, flags | RTCF_LOCAL, res.type,
|
||||
rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
|
||||
flags | RTCF_LOCAL, res.type,
|
||||
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
|
||||
if (!rth)
|
||||
goto e_nobufs;
|
||||
|
|
|
@ -1373,7 +1373,7 @@ static int __ip6_append_data(struct sock *sk,
|
|||
*/
|
||||
|
||||
cork->length += length;
|
||||
if (((length > mtu) ||
|
||||
if ((((length + fragheaderlen) > mtu) ||
|
||||
(skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
|
|
|
@ -47,7 +47,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
|
|||
return (struct l2tp_ip_sock *)sk;
|
||||
}
|
||||
|
||||
static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
|
||||
static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
|
||||
__be32 raddr, int dif, u32 tunnel_id)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
|
@ -61,6 +62,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
|
|||
if ((l2tp->conn_id == tunnel_id) &&
|
||||
net_eq(sock_net(sk), net) &&
|
||||
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
|
||||
(!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
|
||||
(!sk->sk_bound_dev_if || !dif ||
|
||||
sk->sk_bound_dev_if == dif))
|
||||
goto found;
|
||||
|
@ -71,15 +73,6 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
|
|||
return sk;
|
||||
}
|
||||
|
||||
static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
|
||||
{
|
||||
struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
|
||||
if (sk)
|
||||
sock_hold(sk);
|
||||
|
||||
return sk;
|
||||
}
|
||||
|
||||
/* When processing receive frames, there are two cases to
|
||||
* consider. Data frames consist of a non-zero session-id and an
|
||||
* optional cookie. Control frames consist of a regular L2TP header
|
||||
|
@ -183,8 +176,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
|
|||
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
|
||||
|
||||
read_lock_bh(&l2tp_ip_lock);
|
||||
sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
|
||||
tunnel_id);
|
||||
sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
|
||||
inet_iif(skb), tunnel_id);
|
||||
if (!sk) {
|
||||
read_unlock_bh(&l2tp_ip_lock);
|
||||
goto discard;
|
||||
|
@ -280,7 +273,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
inet->inet_saddr = 0; /* Use device */
|
||||
|
||||
write_lock_bh(&l2tp_ip_lock);
|
||||
if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
|
||||
if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
|
||||
sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
|
||||
write_unlock_bh(&l2tp_ip_lock);
|
||||
ret = -EADDRINUSE;
|
||||
|
|
|
@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
|
|||
|
||||
static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
|
||||
struct in6_addr *laddr,
|
||||
const struct in6_addr *raddr,
|
||||
int dif, u32 tunnel_id)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
|
||||
const struct in6_addr *addr = inet6_rcv_saddr(sk);
|
||||
const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
|
||||
const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
|
||||
struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
|
||||
|
||||
if (l2tp == NULL)
|
||||
|
@ -72,7 +74,8 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
|
|||
|
||||
if ((l2tp->conn_id == tunnel_id) &&
|
||||
net_eq(sock_net(sk), net) &&
|
||||
(!addr || ipv6_addr_equal(addr, laddr)) &&
|
||||
(!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
|
||||
(!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) &&
|
||||
(!sk->sk_bound_dev_if || !dif ||
|
||||
sk->sk_bound_dev_if == dif))
|
||||
goto found;
|
||||
|
@ -83,17 +86,6 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
|
|||
return sk;
|
||||
}
|
||||
|
||||
static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
|
||||
struct in6_addr *laddr,
|
||||
int dif, u32 tunnel_id)
|
||||
{
|
||||
struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
|
||||
if (sk)
|
||||
sock_hold(sk);
|
||||
|
||||
return sk;
|
||||
}
|
||||
|
||||
/* When processing receive frames, there are two cases to
|
||||
* consider. Data frames consist of a non-zero session-id and an
|
||||
* optional cookie. Control frames consist of a regular L2TP header
|
||||
|
@ -197,8 +189,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
|
|||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
|
||||
read_lock_bh(&l2tp_ip6_lock);
|
||||
sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
|
||||
tunnel_id);
|
||||
sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
|
||||
inet6_iif(skb), tunnel_id);
|
||||
if (!sk) {
|
||||
read_unlock_bh(&l2tp_ip6_lock);
|
||||
goto discard;
|
||||
|
@ -330,7 +322,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
rcu_read_unlock();
|
||||
|
||||
write_lock_bh(&l2tp_ip6_lock);
|
||||
if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
|
||||
if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
|
||||
addr->l2tp_conn_id)) {
|
||||
write_unlock_bh(&l2tp_ip6_lock);
|
||||
err = -EADDRINUSE;
|
||||
|
|
|
@ -3287,7 +3287,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
|||
int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
|
||||
int hw_headroom = sdata->local->hw.extra_tx_headroom;
|
||||
struct ethhdr eth;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info *info;
|
||||
struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
|
||||
struct ieee80211_tx_data tx;
|
||||
ieee80211_tx_result r;
|
||||
|
@ -3351,6 +3351,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
|||
memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
|
||||
memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
|
||||
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->band = fast_tx->band;
|
||||
info->control.vif = &sdata->vif;
|
||||
|
|
|
@ -153,10 +153,14 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
|||
|
||||
switch (ip_tunnel_info_af(info)) {
|
||||
case AF_INET:
|
||||
skb_key.enc_control.addr_type =
|
||||
FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
||||
skb_key.enc_ipv4.src = key->u.ipv4.src;
|
||||
skb_key.enc_ipv4.dst = key->u.ipv4.dst;
|
||||
break;
|
||||
case AF_INET6:
|
||||
skb_key.enc_control.addr_type =
|
||||
FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
||||
skb_key.enc_ipv6.src = key->u.ipv6.src;
|
||||
skb_key.enc_ipv6.dst = key->u.ipv6.dst;
|
||||
break;
|
||||
|
|
|
@ -537,7 +537,7 @@ int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
|
|||
{
|
||||
int err = simple_setattr(dentry, iattr);
|
||||
|
||||
if (!err) {
|
||||
if (!err && (iattr->ia_valid & ATTR_UID)) {
|
||||
struct socket *sock = SOCKET_I(d_inode(dentry));
|
||||
|
||||
sock->sk->sk_uid = iattr->ia_uid;
|
||||
|
|
Loading…
Reference in New Issue