Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Fix leak in nl80211 AP start where we leak the ACL memory, from
    Johannes Berg.

 2) Fix double mutex unlock in mac80211, from Andrei Otcheretianski.

 3) Fix RCU stall in ipset, from Jozsef Kadlecsik.

 4) Fix devlink locking in devlink_dpipe_table_register, from Madhuparna
    Bhowmik.

 5) Fix race causing TX hang in ll_temac, from Esben Haabendal.

 6) Stale eth hdr pointer in br_dev_xmit(), from Nikolay Aleksandrov.

 7) Fix TX hash calculation bounds checking wrt. tc rules, from Amritha
    Nambiar.

 8) Size netlink responses properly in schedule action code to take into
    consideration TCA_ACT_FLAGS. From Jiri Pirko.

 9) Fix firmware paths for mscc PHY driver, from Antoine Tenart.

10) Don't register stmmac notifier multiple times, from Aaro Koskinen.

11) Various rmnet bug fixes, from Taehee Yoo.

12) Fix vsock deadlock in vsock transport release, from Stefano
    Garzarella.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (61 commits)
  net: dsa: mv88e6xxx: Fix masking of egress port
  mlxsw: pci: Wait longer before accessing the device after reset
  sfc: fix timestamp reconstruction at 16-bit rollover points
  vsock: fix potential deadlock in transport->release()
  unix: It's CONFIG_PROC_FS not CONFIG_PROCFS
  net: rmnet: fix packet forwarding in rmnet bridge mode
  net: rmnet: fix bridge mode bugs
  net: rmnet: use upper/lower device infrastructure
  net: rmnet: do not allow to change mux id if mux id is duplicated
  net: rmnet: remove rcu_read_lock in rmnet_force_unassociate_device()
  net: rmnet: fix suspicious RCU usage
  net: rmnet: fix NULL pointer dereference in rmnet_changelink()
  net: rmnet: fix NULL pointer dereference in rmnet_newlink()
  net: phy: marvell: don't interpret PHY status unless resolved
  mlx5: register lag notifier for init network namespace only
  unix: define and set show_fdinfo only if procfs is enabled
  hinic: fix a bug of rss configuration
  hinic: fix a bug of setting hw_ioctxt
  hinic: fix a irq affinity bug
  net/smc: check for valid ib_client_data
  ...
This commit is contained in:
Linus Torvalds 2020-02-27 16:34:41 -08:00
commit 7058b83789
70 changed files with 1059 additions and 533 deletions

View File

@ -487,8 +487,9 @@ phy_register_fixup_for_id()::
The stubs set one of the two matching criteria, and set the other one to
match anything.
When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module,
unregister fixup and free allocate memory are required.
When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module load
time, the module needs to unregister the fixup and free allocated memory when
it's unloaded.
Call one of following function before unloading module::

View File

@ -69,8 +69,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
/* Force link status for IMP port */
reg = core_readl(priv, offset);
reg |= (MII_SW_OR | LINK_STS);
if (priv->type == BCM7278_DEVICE_ID)
reg |= GMII_SPEED_UP_2G;
reg &= ~GMII_SPEED_UP_2G;
core_writel(priv, reg, offset);
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */

View File

@ -278,13 +278,13 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
switch (direction) {
case MV88E6XXX_EGRESS_DIR_INGRESS:
dest_port_chip = &chip->ingress_dest_port;
reg &= MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
reg &= ~MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
reg |= port <<
__bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK);
break;
case MV88E6XXX_EGRESS_DIR_EGRESS:
dest_port_chip = &chip->egress_dest_port;
reg &= MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
reg &= ~MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
reg |= port <<
__bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
break;

View File

@ -11252,7 +11252,7 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
netdev_info(bp->dev, "Receive PF driver unload event!");
netdev_info(bp->dev, "Receive PF driver unload event!\n");
}
#else
@ -11759,7 +11759,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
u32 dw;
if (!pos) {
netdev_info(bp->dev, "Unable do read adapter's DSN");
netdev_info(bp->dev, "Unable do read adapter's DSN\n");
return -EOPNOTSUPP;
}

View File

@ -641,14 +641,14 @@ static int bnxt_dl_params_register(struct bnxt *bp)
rc = devlink_params_register(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
if (rc) {
netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
return rc;
}
rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
ARRAY_SIZE(bnxt_dl_port_params));
if (rc) {
netdev_err(bp->dev, "devlink_port_params_register failed");
netdev_err(bp->dev, "devlink_port_params_register failed\n");
devlink_params_unregister(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
return rc;
@ -679,7 +679,7 @@ int bnxt_dl_register(struct bnxt *bp)
else
dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
if (!dl) {
netdev_warn(bp->dev, "devlink_alloc failed");
netdev_warn(bp->dev, "devlink_alloc failed\n");
return -ENOMEM;
}
@ -692,7 +692,7 @@ int bnxt_dl_register(struct bnxt *bp)
rc = devlink_register(dl, &bp->pdev->dev);
if (rc) {
netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc);
goto err_dl_free;
}
@ -704,7 +704,7 @@ int bnxt_dl_register(struct bnxt *bp)
sizeof(bp->dsn));
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed");
netdev_err(bp->dev, "devlink_port_register failed\n");
goto err_dl_unreg;
}

View File

@ -2028,7 +2028,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
}
if (fw->size > item_len) {
netdev_err(dev, "PKG insufficient update area in nvram: %lu",
netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
(unsigned long)fw->size);
rc = -EFBIG;
} else {
@ -3338,7 +3338,7 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
kfree(coredump.data);
*dump_len += sizeof(struct bnxt_coredump_record);
if (rc == -ENOBUFS)
netdev_err(bp->dev, "Firmware returned large coredump buffer");
netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
return rc;
}

View File

@ -50,7 +50,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
/* check if dev belongs to the same switch */
if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n",
dev->ifindex);
return BNXT_FID_INVALID;
}
@ -70,7 +70,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
struct net_device *dev = act->dev;
if (!dev) {
netdev_info(bp->dev, "no dev in mirred action");
netdev_info(bp->dev, "no dev in mirred action\n");
return -EINVAL;
}
@ -106,7 +106,7 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
const struct ip_tunnel_key *tun_key = &tun_info->key;
if (ip_tunnel_info_af(tun_info) != AF_INET) {
netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
netdev_info(bp->dev, "only IPv4 tunnel-encap is supported\n");
return -EOPNOTSUPP;
}
@ -295,7 +295,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
int i, rc;
if (!flow_action_has_entries(flow_action)) {
netdev_info(bp->dev, "no actions");
netdev_info(bp->dev, "no actions\n");
return -EINVAL;
}
@ -370,7 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
(dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
@ -508,7 +508,7 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
return rc;
}
@ -841,7 +841,7 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*decap_filter_handle = resp->decap_filter_id;
} else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@ -859,7 +859,7 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
return rc;
}
@ -906,7 +906,7 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*encap_record_handle = resp->encap_record_id;
} else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@ -924,7 +924,7 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
return rc;
}
@ -943,7 +943,7 @@ static int bnxt_tc_put_l2_node(struct bnxt *bp,
tc_info->l2_ht_params);
if (rc)
netdev_err(bp->dev,
"Error: %s: rhashtable_remove_fast: %d",
"Error: %s: rhashtable_remove_fast: %d\n",
__func__, rc);
kfree_rcu(l2_node, rcu);
}
@ -972,7 +972,7 @@ bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
if (rc) {
kfree_rcu(l2_node, rcu);
netdev_err(bp->dev,
"Error: %s: rhashtable_insert_fast: %d",
"Error: %s: rhashtable_insert_fast: %d\n",
__func__, rc);
return NULL;
}
@ -1031,7 +1031,7 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
(flow->l4_key.ip_proto != IPPROTO_TCP &&
flow->l4_key.ip_proto != IPPROTO_UDP)) {
netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n",
flow->l4_key.ip_proto);
return false;
}
@ -1088,7 +1088,7 @@ static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
*ht_params);
if (rc) {
netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
rc = -1;
}
kfree_rcu(tunnel_node, rcu);
@ -1129,7 +1129,7 @@ bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
tunnel_node->refcount++;
return tunnel_node;
err:
netdev_info(bp->dev, "error rc=%d", rc);
netdev_info(bp->dev, "error rc=%d\n", rc);
return NULL;
}
@ -1187,7 +1187,7 @@ static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
&decap_l2_node->node,
tc_info->decap_l2_ht_params);
if (rc)
netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
kfree_rcu(decap_l2_node, rcu);
}
}
@ -1227,7 +1227,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
if (IS_ERR(rt)) {
netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr);
return -EOPNOTSUPP;
}
@ -1241,7 +1241,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
if (vlan->real_dev != real_dst_dev) {
netdev_info(bp->dev,
"dst_dev(%s) doesn't use PF-if(%s)",
"dst_dev(%s) doesn't use PF-if(%s)\n",
netdev_name(dst_dev),
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP;
@ -1253,7 +1253,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
#endif
} else if (dst_dev != real_dst_dev) {
netdev_info(bp->dev,
"dst_dev(%s) for %pI4b is not PF-if(%s)",
"dst_dev(%s) for %pI4b is not PF-if(%s)\n",
netdev_name(dst_dev), &flow.daddr,
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP;
@ -1262,7 +1262,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
if (!nbr) {
netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n",
&flow.daddr);
rc = -EOPNOTSUPP;
goto put_rt;
@ -1472,7 +1472,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
tc_info->flow_ht_params);
if (rc)
netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n",
__func__, rc);
kfree_rcu(flow_node, rcu);
@ -1587,7 +1587,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
free_node:
kfree_rcu(new_node, rcu);
done:
netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n",
__func__, tc_flow_cmd->cookie, rc);
return rc;
}
@ -1700,7 +1700,7 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
le64_to_cpu(resp_bytes[i]);
}
} else {
netdev_info(bp->dev, "error rc=%d", rc);
netdev_info(bp->dev, "error rc=%d\n", rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@ -1970,7 +1970,7 @@ static int bnxt_tc_indr_block_event(struct notifier_block *nb,
bp);
if (rc)
netdev_info(bp->dev,
"Failed to register indirect blk: dev: %s",
"Failed to register indirect blk: dev: %s\n",
netdev->name);
break;
case NETDEV_UNREGISTER:

View File

@ -43,7 +43,7 @@ static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
*tx_cfa_action, *rx_cfa_code);
} else {
netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@ -60,7 +60,7 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
return rc;
}
@ -465,7 +465,7 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
return 0;
err:
netdev_info(bp->dev, "%s error=%d", __func__, rc);
netdev_info(bp->dev, "%s error=%d\n", __func__, rc);
kfree(cfa_code_map);
__bnxt_vf_reps_destroy(bp);
return rc;
@ -488,7 +488,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
netdev_info(bp->dev, "already in %s eswitch mode",
netdev_info(bp->dev, "already in %s eswitch mode\n",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
rc = -EINVAL;
@ -508,7 +508,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
}
if (pci_num_vf(bp->pdev) == 0) {
netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
rc = -EPERM;
goto done;
}

View File

@ -294,6 +294,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
*/
if (priv->ext_phy) {
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~ID_MODE_DIS;
reg |= id_mode_dis;
if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
reg |= RGMII_MODE_EN_V123;

View File

@ -297,6 +297,7 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
}
hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
hw_ioctxt.cmdq_depth = 0;

View File

@ -151,8 +151,8 @@ struct hinic_cmd_hw_ioctxt {
u8 lro_en;
u8 rsvd3;
u8 ppf_idx;
u8 rsvd4;
u8 rsvd5;
u16 rq_depth;
u16 rx_buf_sz_idx;

View File

@ -137,6 +137,7 @@
#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)

View File

@ -94,6 +94,7 @@ struct hinic_rq {
struct hinic_wq *wq;
struct cpumask affinity_mask;
u32 irq;
u16 msix_entry;

View File

@ -356,7 +356,8 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
if (!num_cpus)
num_cpus = num_online_cpus();
nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus);
nic_dev->num_qps = hinic_hwdev_num_qps(hwdev);
nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
nic_dev->rss_limit = nic_dev->num_qps;
nic_dev->num_rss = nic_dev->num_qps;

View File

@ -475,7 +475,6 @@ static int rx_request_irq(struct hinic_rxq *rxq)
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_rq *rq = rxq->rq;
struct hinic_qp *qp;
struct cpumask mask;
int err;
rx_add_napi(rxq);
@ -492,8 +491,8 @@ static int rx_request_irq(struct hinic_rxq *rxq)
}
qp = container_of(rq, struct hinic_qp, rq);
cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask);
return irq_set_affinity_hint(rq->irq, &mask);
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
}
static void rx_free_irq(struct hinic_rxq *rxq)

View File

@ -5147,7 +5147,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@ -5168,7 +5167,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev, netdev);
mlx5_lag_remove(mdev);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)

View File

@ -1861,7 +1861,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@ -1870,7 +1869,7 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
#endif
mlx5_notifier_unregister(mdev, &priv->events_nb);
cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
mlx5_lag_remove(mdev, netdev);
mlx5_lag_remove(mdev);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);

View File

@ -464,9 +464,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
struct mlx5_lag *ldev;
int changed = 0;
if (!net_eq(dev_net(ndev), &init_net))
return NOTIFY_DONE;
if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
return NOTIFY_DONE;
@ -586,8 +583,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
if (register_netdevice_notifier_dev_net(netdev, &ldev->nb,
&ldev->nn)) {
if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@ -600,7 +596,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
/* Must be called with intf_mutex held */
void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev)
void mlx5_lag_remove(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
int i;
@ -620,8 +616,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev)
if (i == MLX5_MAX_PORTS) {
if (ldev->nb.notifier_call)
unregister_netdevice_notifier_dev_net(netdev, &ldev->nb,
&ldev->nn);
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
mlx5_lag_dev_free(ldev);

View File

@ -44,7 +44,6 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
struct netdev_net_notifier nn;
struct lag_mp lag_mp;
};

View File

@ -157,7 +157,7 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);

View File

@ -28,7 +28,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_FW_READY_MAGIC 0x5E

View File

@ -513,14 +513,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
{
struct net_device *netdev = pw;
struct ks_net *ks = netdev_priv(netdev);
unsigned long flags;
u16 status;
spin_lock_irqsave(&ks->statelock, flags);
/*this should be the first in IRQ handler */
ks_save_cmd_reg(ks);
status = ks_rdreg16(ks, KS_ISR);
if (unlikely(!status)) {
ks_restore_cmd_reg(ks);
spin_unlock_irqrestore(&ks->statelock, flags);
return IRQ_NONE;
}
@ -546,6 +549,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
ks->netdev->stats.rx_over_errors++;
/* this should be the last in IRQ handler*/
ks_restore_cmd_reg(ks);
spin_unlock_irqrestore(&ks->statelock, flags);
return IRQ_HANDLED;
}
@ -615,6 +619,7 @@ static int ks_net_stop(struct net_device *netdev)
/* shutdown RX/TX QMU */
ks_disable_qmu(ks);
ks_disable_int(ks);
/* set powermode to soft power down to save power */
ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
@ -671,10 +676,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
netdev_tx_t retv = NETDEV_TX_OK;
struct ks_net *ks = netdev_priv(netdev);
unsigned long flags;
disable_irq(netdev->irq);
ks_disable_int(ks);
spin_lock(&ks->statelock);
spin_lock_irqsave(&ks->statelock, flags);
/* Extra space are required:
* 4 byte for alignment, 4 for status/length, 4 for CRC
@ -688,9 +692,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb(skb);
} else
retv = NETDEV_TX_BUSY;
spin_unlock(&ks->statelock);
ks_enable_int(ks);
enable_irq(netdev->irq);
spin_unlock_irqrestore(&ks->statelock, flags);
return retv;
}

View File

@ -13,25 +13,6 @@
#include "rmnet_vnd.h"
#include "rmnet_private.h"
/* Locking scheme -
* The shared resource which needs to be protected is realdev->rx_handler_data.
* For the writer path, this is using rtnl_lock(). The writer paths are
* rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
* paths are already called with rtnl_lock() acquired in. There is also an
* ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
* dereference here, we will need to use rtnl_dereference(). Dev list writing
* needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
* For the reader path, the real_dev->rx_handler_data is called in the TX / RX
* path. We only need rcu_read_lock() for these scenarios. In these cases,
* the rcu_read_lock() is held in __dev_queue_xmit() and
* netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
* to get the relevant information. For dev list reading, we again acquire
* rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
* We also use unregister_netdevice_many() to free all rmnet devices in
* rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
* same context.
*/
/* Local Definitions and Declarations */
static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
@ -51,9 +32,10 @@ rmnet_get_port_rtnl(const struct net_device *real_dev)
return rtnl_dereference(real_dev->rx_handler_data);
}
static int rmnet_unregister_real_device(struct net_device *real_dev,
struct rmnet_port *port)
static int rmnet_unregister_real_device(struct net_device *real_dev)
{
struct rmnet_port *port = rmnet_get_port_rtnl(real_dev);
if (port->nr_rmnet_devs)
return -EINVAL;
@ -61,9 +43,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
kfree(port);
/* release reference on real_dev */
dev_put(real_dev);
netdev_dbg(real_dev, "Removed from rmnet\n");
return 0;
}
@ -89,9 +68,6 @@ static int rmnet_register_real_device(struct net_device *real_dev)
return -EBUSY;
}
/* hold on to real dev for MAP data */
dev_hold(real_dev);
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
@ -99,28 +75,33 @@ static int rmnet_register_real_device(struct net_device *real_dev)
return 0;
}
static void rmnet_unregister_bridge(struct net_device *dev,
struct rmnet_port *port)
static void rmnet_unregister_bridge(struct rmnet_port *port)
{
struct rmnet_port *bridge_port;
struct net_device *bridge_dev;
struct net_device *bridge_dev, *real_dev, *rmnet_dev;
struct rmnet_port *real_port;
if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
return;
/* bridge slave handling */
rmnet_dev = port->rmnet_dev;
if (!port->nr_rmnet_devs) {
bridge_dev = port->bridge_ep;
/* bridge device */
real_dev = port->bridge_ep;
bridge_dev = port->dev;
bridge_port = rmnet_get_port_rtnl(bridge_dev);
bridge_port->bridge_ep = NULL;
bridge_port->rmnet_mode = RMNET_EPMODE_VND;
real_port = rmnet_get_port_rtnl(real_dev);
real_port->bridge_ep = NULL;
real_port->rmnet_mode = RMNET_EPMODE_VND;
} else {
/* real device */
bridge_dev = port->bridge_ep;
bridge_port = rmnet_get_port_rtnl(bridge_dev);
rmnet_unregister_real_device(bridge_dev, bridge_port);
port->bridge_ep = NULL;
port->rmnet_mode = RMNET_EPMODE_VND;
}
netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
rmnet_unregister_real_device(bridge_dev);
}
static int rmnet_newlink(struct net *src_net, struct net_device *dev,
@ -135,6 +116,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
int err = 0;
u16 mux_id;
if (!tb[IFLA_LINK]) {
NL_SET_ERR_MSG_MOD(extack, "link not specified");
return -EINVAL;
}
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev || !dev)
return -ENODEV;
@ -157,7 +143,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (err)
goto err1;
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
goto err2;
port->rmnet_mode = mode;
port->rmnet_dev = dev;
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
@ -173,8 +164,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return 0;
err2:
unregister_netdevice(dev);
rmnet_vnd_dellink(mux_id, port, ep);
err1:
rmnet_unregister_real_device(real_dev, port);
rmnet_unregister_real_device(real_dev);
err0:
kfree(ep);
return err;
@ -183,77 +177,74 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
struct net_device *real_dev, *bridge_dev;
struct rmnet_port *real_port, *bridge_port;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
u8 mux_id;
u8 mux_id = priv->mux_id;
real_dev = priv->real_dev;
if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
if (!rmnet_is_real_dev_registered(real_dev))
return;
port = rmnet_get_port_rtnl(real_dev);
real_port = rmnet_get_port_rtnl(real_dev);
bridge_dev = real_port->bridge_ep;
if (bridge_dev) {
bridge_port = rmnet_get_port_rtnl(bridge_dev);
rmnet_unregister_bridge(bridge_port);
}
mux_id = rmnet_vnd_get_mux(dev);
ep = rmnet_get_endpoint(port, mux_id);
ep = rmnet_get_endpoint(real_port, mux_id);
if (ep) {
hlist_del_init_rcu(&ep->hlnode);
rmnet_unregister_bridge(dev, port);
rmnet_vnd_dellink(mux_id, port, ep);
rmnet_vnd_dellink(mux_id, real_port, ep);
kfree(ep);
}
rmnet_unregister_real_device(real_dev, port);
netdev_upper_dev_unlink(real_dev, dev);
rmnet_unregister_real_device(real_dev);
unregister_netdevice_queue(dev, head);
}
static void rmnet_force_unassociate_device(struct net_device *dev)
static void rmnet_force_unassociate_device(struct net_device *real_dev)
{
struct net_device *real_dev = dev;
struct hlist_node *tmp_ep;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
unsigned long bkt_ep;
LIST_HEAD(list);
if (!rmnet_is_real_dev_registered(real_dev))
return;
port = rmnet_get_port_rtnl(real_dev);
ASSERT_RTNL();
port = rmnet_get_port_rtnl(dev);
rcu_read_lock();
rmnet_unregister_bridge(dev, port);
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
unregister_netdevice_queue(ep->egress_dev, &list);
rmnet_vnd_dellink(ep->mux_id, port, ep);
hlist_del_init_rcu(&ep->hlnode);
kfree(ep);
if (port->nr_rmnet_devs) {
/* real device */
rmnet_unregister_bridge(port);
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
unregister_netdevice_queue(ep->egress_dev, &list);
netdev_upper_dev_unlink(real_dev, ep->egress_dev);
rmnet_vnd_dellink(ep->mux_id, port, ep);
hlist_del_init_rcu(&ep->hlnode);
kfree(ep);
}
rmnet_unregister_real_device(real_dev);
unregister_netdevice_many(&list);
} else {
rmnet_unregister_bridge(port);
}
rcu_read_unlock();
unregister_netdevice_many(&list);
rmnet_unregister_real_device(real_dev, port);
}
static int rmnet_config_notify_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct net_device *dev = netdev_notifier_info_to_dev(data);
struct net_device *real_dev = netdev_notifier_info_to_dev(data);
if (!dev)
if (!rmnet_is_real_dev_registered(real_dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
netdev_dbg(dev, "Kernel unregister\n");
rmnet_force_unassociate_device(dev);
netdev_dbg(real_dev, "Kernel unregister\n");
rmnet_force_unassociate_device(real_dev);
break;
default:
@ -295,16 +286,18 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (!dev)
return -ENODEV;
real_dev = __dev_get_by_index(dev_net(dev),
nla_get_u32(tb[IFLA_LINK]));
if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
real_dev = priv->real_dev;
if (!rmnet_is_real_dev_registered(real_dev))
return -ENODEV;
port = rmnet_get_port_rtnl(real_dev);
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
if (rmnet_get_endpoint(port, mux_id)) {
NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
return -EINVAL;
}
ep = rmnet_get_endpoint(port, priv->mux_id);
if (!ep)
return -ENODEV;
@ -379,11 +372,10 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.fill_info = rmnet_fill_info,
};
/* Needs either rcu_read_lock() or rtnl lock */
struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev)
{
if (rmnet_is_real_dev_registered(real_dev))
return rcu_dereference_rtnl(real_dev->rx_handler_data);
return rcu_dereference_bh(real_dev->rx_handler_data);
else
return NULL;
}
@ -409,7 +401,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
struct rmnet_port *port, *slave_port;
int err;
port = rmnet_get_port(real_dev);
port = rmnet_get_port_rtnl(real_dev);
/* If there is more than one rmnet dev attached, its probably being
* used for muxing. Skip the briding in that case
@ -417,6 +409,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (port->nr_rmnet_devs > 1)
return -EINVAL;
if (port->rmnet_mode != RMNET_EPMODE_VND)
return -EINVAL;
if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
@ -424,9 +419,17 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (err)
return -EBUSY;
slave_port = rmnet_get_port(slave_dev);
err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
extack);
if (err) {
rmnet_unregister_real_device(slave_dev);
return err;
}
slave_port = rmnet_get_port_rtnl(slave_dev);
slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
slave_port->bridge_ep = real_dev;
slave_port->rmnet_dev = rmnet_dev;
port->rmnet_mode = RMNET_EPMODE_BRIDGE;
port->bridge_ep = slave_dev;
@ -438,16 +441,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
int rmnet_del_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev)
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
struct net_device *real_dev = priv->real_dev;
struct rmnet_port *port, *slave_port;
struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev);
port = rmnet_get_port(real_dev);
port->rmnet_mode = RMNET_EPMODE_VND;
port->bridge_ep = NULL;
slave_port = rmnet_get_port(slave_dev);
rmnet_unregister_real_device(slave_dev, slave_port);
rmnet_unregister_bridge(port);
netdev_dbg(slave_dev, "removed from rmnet as slave\n");
return 0;
@ -473,8 +469,8 @@ static int __init rmnet_init(void)
static void __exit rmnet_exit(void)
{
unregister_netdevice_notifier(&rmnet_dev_notifier);
rtnl_link_unregister(&rmnet_link_ops);
unregister_netdevice_notifier(&rmnet_dev_notifier);
}
module_init(rmnet_init)

View File

@ -28,6 +28,7 @@ struct rmnet_port {
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
struct net_device *rmnet_dev;
};
extern struct rtnl_link_ops rmnet_link_ops;
@ -65,7 +66,7 @@ struct rmnet_priv {
struct rmnet_priv_stats stats;
};
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev);
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
int rmnet_add_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev,

View File

@ -159,6 +159,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
if (skb_mac_header_was_set(skb))
skb_push(skb, skb->mac_len);
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
@ -184,7 +187,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
return RX_HANDLER_PASS;
dev = skb->dev;
port = rmnet_get_port(dev);
port = rmnet_get_port_rcu(dev);
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
@ -217,7 +220,7 @@ void rmnet_egress_handler(struct sk_buff *skb)
skb->dev = priv->real_dev;
mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev);
port = rmnet_get_port_rcu(skb->dev);
if (!port)
goto drop;

View File

@ -266,14 +266,6 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
return 0;
}
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
{
struct rmnet_priv *priv;
priv = netdev_priv(rmnet_dev);
return priv->mux_id;
}
int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
{
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);

View File

@ -16,6 +16,5 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
void rmnet_vnd_setup(struct net_device *dev);
#endif /* _RMNET_VND_H_ */

View File

@ -560,13 +560,45 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
u32 nic_major, u32 nic_minor,
s32 correction)
{
u32 sync_timestamp;
ktime_t kt = { 0 };
s16 delta;
if (!(nic_major & 0x80000000)) {
WARN_ON_ONCE(nic_major >> 16);
/* Use the top bits from the latest sync event. */
nic_major &= 0xffff;
nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
/* Medford provides 48 bits of timestamp, so we must get the top
* 16 bits from the timesync event state.
*
* We only have the lower 16 bits of the time now, but we do
* have a full resolution timestamp at some point in past. As
* long as the difference between the (real) now and the sync
* is less than 2^15, then we can reconstruct the difference
* between those two numbers using only the lower 16 bits of
* each.
*
* Put another way
*
* a - b = ((a mod k) - b) mod k
*
* when -k/2 < (a-b) < k/2. In our case k is 2^16. We know
* (a mod k) and b, so can calculate the delta, a - b.
*
*/
sync_timestamp = last_sync_timestamp_major(efx);
/* Because delta is s16 this does an implicit mask down to
* 16 bits which is what we need, assuming
* MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that
* we can deal with the (unlikely) case of sync timestamps
* arriving from the future.
*/
delta = nic_major - sync_timestamp;
/* Recover the fully specified time now, by applying the offset
* to the (fully specified) sync time.
*/
nic_major = sync_timestamp + delta;
kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
correction);

View File

@ -4405,6 +4405,8 @@ static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
rtnl_lock();
/* Create per netdev entries */
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
@ -4416,14 +4418,13 @@ static void stmmac_init_fs(struct net_device *dev)
debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
&stmmac_dma_cap_fops);
register_netdevice_notifier(&stmmac_notifier);
rtnl_unlock();
}
static void stmmac_exit_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
unregister_netdevice_notifier(&stmmac_notifier);
debugfs_remove_recursive(priv->dbgfs_dir);
}
#endif /* CONFIG_DEBUG_FS */
@ -4940,14 +4941,14 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
#ifdef CONFIG_DEBUG_FS
stmmac_exit_fs(ndev);
#endif
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
stmmac_exit_fs(ndev);
#endif
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
@ -5166,6 +5167,7 @@ static int __init stmmac_init(void)
/* Create debugfs main directory if it doesn't exist yet */
if (!stmmac_fs_dir)
stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
register_netdevice_notifier(&stmmac_notifier);
#endif
return 0;
@ -5174,6 +5176,7 @@ static int __init stmmac_init(void)
static void __exit stmmac_exit(void)
{
#ifdef CONFIG_DEBUG_FS
unregister_netdevice_notifier(&stmmac_notifier);
debugfs_remove_recursive(stmmac_fs_dir);
#endif
}

View File

@ -375,10 +375,14 @@ struct temac_local {
int tx_bd_next;
int tx_bd_tail;
int rx_bd_ci;
int rx_bd_tail;
/* DMA channel control setup */
u32 tx_chnl_ctrl;
u32 rx_chnl_ctrl;
u8 coalesce_count_rx;
struct delayed_work restart_work;
};
/* Wrappers for temac_ior()/temac_iow() function pointers above */

View File

@ -51,6 +51,7 @@
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/processor.h>
#include <linux/platform_data/xilinx-ll-temac.h>
@ -367,6 +368,8 @@ static int temac_dma_bd_init(struct net_device *ndev)
skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
goto out;
lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
@ -387,12 +390,13 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->tx_bd_next = 0;
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
lp->rx_bd_tail = RX_BD_NUM - 1;
/* Enable RX DMA transfers */
wmb();
lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
/* Prepare for TX DMA transfer */
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
@ -788,6 +792,9 @@ static void temac_start_xmit_done(struct net_device *ndev)
stat = be32_to_cpu(cur_p->app0);
}
/* Matches barrier in temac_start_xmit */
smp_mb();
netif_wake_queue(ndev);
}
@ -830,9 +837,19 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag + 1)) {
if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
if (netif_queue_stopped(ndev))
return NETDEV_TX_BUSY;
netif_stop_queue(ndev);
/* Matches barrier in temac_start_xmit_done */
smp_mb();
/* Space might have just been freed - check again */
if (temac_check_tx_bd_space(lp, num_frag))
return NETDEV_TX_BUSY;
netif_wake_queue(ndev);
}
cur_p->app0 = 0;
@ -850,12 +867,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
cur_p->len = cpu_to_be32(skb_headlen(skb));
if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
dev_kfree_skb_any(skb);
ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
cur_p->phys = cpu_to_be32(skb_dma_addr);
ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= TX_BD_NUM)
if (++lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@ -863,6 +884,27 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
if (--lp->tx_bd_tail < 0)
lp->tx_bd_tail = TX_BD_NUM - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
while (--ii >= 0) {
--frag;
dma_unmap_single(ndev->dev.parent,
be32_to_cpu(cur_p->phys),
skb_frag_size(frag),
DMA_TO_DEVICE);
if (--lp->tx_bd_tail < 0)
lp->tx_bd_tail = TX_BD_NUM - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
}
dma_unmap_single(ndev->dev.parent,
be32_to_cpu(cur_p->phys),
skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
cur_p->phys = cpu_to_be32(skb_dma_addr);
cur_p->len = cpu_to_be32(skb_frag_size(frag));
cur_p->app0 = 0;
@ -884,31 +926,56 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
static int ll_temac_recv_buffers_available(struct temac_local *lp)
{
int available;
if (!lp->rx_skb[lp->rx_bd_ci])
return 0;
available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
if (available <= 0)
available += RX_BD_NUM;
return available;
}
static void ll_temac_recv(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
unsigned int bdstat;
struct cdmac_bd *cur_p;
dma_addr_t tail_p, skb_dma_addr;
int length;
unsigned long flags;
int rx_bd;
bool update_tail = false;
spin_lock_irqsave(&lp->rx_lock, flags);
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
/* Process all received buffers, passing them on network
* stack. After this, the buffer descriptors will be in an
* un-allocated stage, where no skb is allocated for it, and
* they are therefore not available for TEMAC/DMA.
*/
do {
struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
unsigned int bdstat = be32_to_cpu(bd->app0);
int length;
bdstat = be32_to_cpu(cur_p->app0);
while ((bdstat & STS_CTRL_APP0_CMPLT)) {
/* While this should not normally happen, we can end
* here when GFP_ATOMIC allocations fail, and we
* therefore have un-allocated buffers.
*/
if (!skb)
break;
skb = lp->rx_skb[lp->rx_bd_ci];
length = be32_to_cpu(cur_p->app4) & 0x3FFF;
/* Loop over all completed buffer descriptors */
if (!(bdstat & STS_CTRL_APP0_CMPLT))
break;
dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
/* The buffer is not valid for DMA anymore */
bd->phys = 0;
bd->len = 0;
length = be32_to_cpu(bd->app4) & 0x3FFF;
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
@ -923,43 +990,102 @@ static void ll_temac_recv(struct net_device *ndev)
* (back) for proper IP checksum byte order
* (be16).
*/
skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb);
/* The skb buffer is now owned by network stack above */
lp->rx_skb[lp->rx_bd_ci] = NULL;
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
new_skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
if (!new_skb) {
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
rx_bd = lp->rx_bd_ci;
if (++lp->rx_bd_ci >= RX_BD_NUM)
lp->rx_bd_ci = 0;
} while (rx_bd != lp->rx_bd_tail);
/* DMA operations will halt when the last buffer descriptor is
* processed (ie. the one pointed to by RX_TAILDESC_PTR).
* When that happens, no more interrupt events will be
* generated. No IRQ_COAL or IRQ_DLY, and not even an
* IRQ_ERR. To avoid stalling, we schedule a delayed work
* when there is a potential risk of that happening. The work
* will call this function, and thus re-schedule itself until
* enough buffers are available again.
*/
if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
schedule_delayed_work(&lp->restart_work, HZ / 1000);
/* Allocate new buffers for those buffer descriptors that were
* passed to network stack. Note that GFP_ATOMIC allocations
* can fail (e.g. when a larger burst of GFP_ATOMIC
* allocations occurs), so while we try to allocate all
* buffers in the same interrupt where they were processed, we
* continue with what we could get in case of allocation
* failure. Allocation of remaining buffers will be retried
* in following calls.
*/
while (1) {
struct sk_buff *skb;
struct cdmac_bd *bd;
dma_addr_t skb_dma_addr;
rx_bd = lp->rx_bd_tail + 1;
if (rx_bd >= RX_BD_NUM)
rx_bd = 0;
bd = &lp->rx_bd_v[rx_bd];
if (bd->phys)
break; /* All skb's allocated */
skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
if (!skb) {
dev_warn(&ndev->dev, "skb alloc failed\n");
break;
}
cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
cur_p->phys = cpu_to_be32(skb_dma_addr);
cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
lp->rx_skb[lp->rx_bd_ci] = new_skb;
if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
skb_dma_addr))) {
dev_kfree_skb_any(skb);
break;
}
lp->rx_bd_ci++;
if (lp->rx_bd_ci >= RX_BD_NUM)
lp->rx_bd_ci = 0;
bd->phys = cpu_to_be32(skb_dma_addr);
bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
lp->rx_skb[rx_bd] = skb;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
bdstat = be32_to_cpu(cur_p->app0);
lp->rx_bd_tail = rx_bd;
update_tail = true;
}
/* Move tail pointer when buffers have been allocated */
if (update_tail) {
lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
}
lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
/* Function scheduled to ensure a restart in case of DMA halt
* condition caused by running out of buffer descriptors.
*/
static void ll_temac_restart_work_func(struct work_struct *work)
{
struct temac_local *lp = container_of(work, struct temac_local,
restart_work.work);
struct net_device *ndev = lp->ndev;
ll_temac_recv(ndev);
}
static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
{
struct net_device *ndev = _ndev;
@ -1052,6 +1178,8 @@ static int temac_stop(struct net_device *ndev)
dev_dbg(&ndev->dev, "temac_close()\n");
cancel_delayed_work_sync(&lp->restart_work);
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
@ -1173,6 +1301,7 @@ static int temac_probe(struct platform_device *pdev)
lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS;
spin_lock_init(&lp->rx_lock);
INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
/* Setup mutex for synchronization of indirect register access */
if (pdata) {
@ -1279,6 +1408,7 @@ static int temac_probe(struct platform_device *pdev)
*/
lp->tx_chnl_ctrl = 0x10220000;
lp->rx_chnl_ctrl = 0xff070000;
lp->coalesce_count_rx = 0x07;
/* Finished with the DMA node; drop the reference */
of_node_put(dma_np);
@ -1310,11 +1440,14 @@ static int temac_probe(struct platform_device *pdev)
(pdata->tx_irq_count << 16);
else
lp->tx_chnl_ctrl = 0x10220000;
if (pdata->rx_irq_timeout || pdata->rx_irq_count)
if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
(pdata->rx_irq_count << 16);
else
lp->coalesce_count_rx = pdata->rx_irq_count;
} else {
lp->rx_chnl_ctrl = 0xff070000;
lp->coalesce_count_rx = 0x07;
}
}
/* Error handle returned DMA RX and TX interrupts */

View File

@ -99,7 +99,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
net_device->tx_disable = false;
net_device->tx_disable = true;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;

View File

@ -1068,6 +1068,7 @@ static int netvsc_attach(struct net_device *ndev,
}
/* In any case device is now ready */
nvdev->tx_disable = false;
netif_device_attach(ndev);
/* Note: enable and attach happen when sub-channels setup */
@ -2476,6 +2477,8 @@ static int netvsc_probe(struct hv_device *dev,
else
net->max_mtu = ETH_DATA_LEN;
nvdev->tx_disable = false;
ret = register_netdevice(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");

View File

@ -1306,6 +1306,9 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
}
if (!(status & MII_M1011_PHY_STATUS_RESOLVED))
return 0;
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
@ -1365,6 +1368,8 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
linkmode_zero(phydev->lp_advertising);
phydev->pause = 0;
phydev->asym_pause = 0;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
if (phydev->autoneg == AUTONEG_ENABLE)
err = marvell_read_status_page_an(phydev, fiber, status);

View File

@ -345,11 +345,11 @@ enum macsec_bank {
BIT(VSC8531_FORCE_LED_OFF) | \
BIT(VSC8531_FORCE_LED_ON))
#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin"
#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin"
#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800
#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48
#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin"
#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin"
#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000
#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8

View File

@ -167,7 +167,7 @@ EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
*/
int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart)
{
int ret = 0;
int ret;
if (!restart) {
/* Configure and restart aneg if it wasn't set before */
@ -180,9 +180,9 @@ int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart)
}
if (restart)
ret = genphy_c45_restart_aneg(phydev);
return genphy_c45_restart_aneg(phydev);
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg);

View File

@ -247,7 +247,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
* MDIO bus driver and clock gated at this point.
*/
if (!netdev)
return !phydev->suspended;
goto out;
if (netdev->wol_enabled)
return false;
@ -267,7 +267,8 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (device_may_wakeup(&netdev->dev))
return false;
return true;
out:
return !phydev->suspended;
}
static int mdio_bus_phy_suspend(struct device *dev)
@ -1792,7 +1793,7 @@ EXPORT_SYMBOL(genphy_restart_aneg);
*/
int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart)
{
int ret = 0;
int ret;
if (!restart) {
/* Advertisement hasn't changed, but maybe aneg was never on to
@ -1807,9 +1808,9 @@ int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart)
}
if (restart)
ret = genphy_restart_aneg(phydev);
return genphy_restart_aneg(phydev);
return ret;
return 0;
}
EXPORT_SYMBOL(genphy_check_and_restart_aneg);

View File

@ -863,7 +863,10 @@ static int slip_open(struct tty_struct *tty)
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
sl_free_netdev(sl->dev);
/* do not call free_netdev before rtnl_unlock */
rtnl_unlock();
free_netdev(sl->dev);
return err;
err_exit:
rtnl_unlock();

View File

@ -337,6 +337,9 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
netdev_dbg(net, "mode: raw IP\n");
} else if (!net->header_ops) { /* don't bother if already set */
ether_setup(net);
/* Restoring min/max mtu values set originally by usbnet */
net->min_mtu = 0;
net->max_mtu = ETH_MAX_MTU;
clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
netdev_dbg(net, "mode: Ethernet\n");
}

View File

@ -1414,10 +1414,6 @@ static int vhost_net_release(struct inode *inode, struct file *f)
static struct socket *get_raw_socket(int fd)
{
struct {
struct sockaddr_ll sa;
char buf[MAX_ADDR_LEN];
} uaddr;
int r;
struct socket *sock = sockfd_lookup(fd, &r);
@ -1430,11 +1426,7 @@ static struct socket *get_raw_socket(int fd)
goto err;
}
r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
if (r < 0)
goto err;
if (uaddr.sa.sll_family != AF_PACKET) {
if (sock->sk->sk_family != AF_PACKET) {
r = -EPFNOSUPPORT;
goto err;
}

View File

@ -22,19 +22,23 @@ extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
#if IS_ENABLED(CONFIG_NF_NAT)
void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
#else
#define icmpv6_ndo_send icmpv6_send
#endif
#else
static inline void icmpv6_send(struct sk_buff *skb,
u8 type, u8 code, __u32 info)
{
}
#endif
#if IS_ENABLED(CONFIG_NF_NAT)
void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
#else
#define icmpv6_ndo_send icmpv6_send
static inline void icmpv6_ndo_send(struct sk_buff *skb,
u8 type, u8 code, __u32 info)
{
}
#endif
extern int icmpv6_init(void);

View File

@ -121,6 +121,7 @@ struct ip_set_ext {
u32 timeout;
u8 packets_op;
u8 bytes_op;
bool target;
};
struct ip_set;
@ -187,6 +188,14 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
/* Region-locking is used */
bool region_lock;
};
struct ip_set_region {
spinlock_t lock; /* Region lock */
size_t ext_size; /* Size of the dynamic extensions */
u32 elements; /* Number of elements vs timeout */
};
/* The core set type structure */
@ -501,7 +510,7 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
}
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
{ .bytes = (skb)->len, .packets = 1, .target = true,\
.timeout = ip_set_adt_opt_timeout(opt, set) }
#define IP_SET_INIT_UEXT(set) \

View File

@ -34,7 +34,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
const struct nf_br_ops *nf_ops;
u8 state = BR_STATE_FORWARDING;
const unsigned char *dest;
struct ethhdr *eth;
u16 vid = 0;
rcu_read_lock();
@ -54,15 +53,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
skb_pull(skb, ETH_HLEN);
if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, &state))
goto out;
if (IS_ENABLED(CONFIG_INET) &&
(eth->h_proto == htons(ETH_P_ARP) ||
eth->h_proto == htons(ETH_P_RARP)) &&
(eth_hdr(skb)->h_proto == htons(ETH_P_ARP) ||
eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
br_do_proxy_suppress_arp(skb, br, vid, NULL);
} else if (IS_ENABLED(CONFIG_IPV6) &&

View File

@ -3076,6 +3076,8 @@ static u16 skb_tx_hash(const struct net_device *dev,
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
if (hash >= qoffset)
hash -= qoffset;
while (unlikely(hash >= qcount))
hash -= qcount;
return hash + qoffset;

View File

@ -2103,11 +2103,11 @@ static int devlink_dpipe_entry_put(struct sk_buff *skb,
static struct devlink_dpipe_table *
devlink_dpipe_table_find(struct list_head *dpipe_tables,
const char *table_name)
const char *table_name, struct devlink *devlink)
{
struct devlink_dpipe_table *table;
list_for_each_entry_rcu(table, dpipe_tables, list) {
list_for_each_entry_rcu(table, dpipe_tables, list,
lockdep_is_held(&devlink->lock)) {
if (!strcmp(table->name, table_name))
return table;
}
@ -2226,7 +2226,7 @@ static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name);
table_name, devlink);
if (!table)
return -EINVAL;
@ -2382,7 +2382,7 @@ static int devlink_dpipe_table_counters_set(struct devlink *devlink,
struct devlink_dpipe_table *table;
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name);
table_name, devlink);
if (!table)
return -EINVAL;
@ -6854,7 +6854,7 @@ bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
rcu_read_lock();
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name);
table_name, devlink);
enabled = false;
if (table)
enabled = table->counters_enabled;
@ -6878,26 +6878,34 @@ int devlink_dpipe_table_register(struct devlink *devlink,
void *priv, bool counter_control_extern)
{
struct devlink_dpipe_table *table;
if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name))
return -EEXIST;
int err = 0;
if (WARN_ON(!table_ops->size_get))
return -EINVAL;
mutex_lock(&devlink->lock);
if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name,
devlink)) {
err = -EEXIST;
goto unlock;
}
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
if (!table) {
err = -ENOMEM;
goto unlock;
}
table->name = table_name;
table->table_ops = table_ops;
table->priv = priv;
table->counter_control_extern = counter_control_extern;
mutex_lock(&devlink->lock);
list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
unlock:
mutex_unlock(&devlink->lock);
return 0;
return err;
}
EXPORT_SYMBOL_GPL(devlink_dpipe_table_register);
@ -6914,7 +6922,7 @@ void devlink_dpipe_table_unregister(struct devlink *devlink,
mutex_lock(&devlink->lock);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name);
table_name, devlink);
if (!table)
goto unlock;
list_del_rcu(&table->list);
@ -7071,7 +7079,7 @@ int devlink_dpipe_table_resource_set(struct devlink *devlink,
mutex_lock(&devlink->lock);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name);
table_name, devlink);
if (!table) {
err = -EINVAL;
goto out;

View File

@ -305,7 +305,8 @@ int ethnl_put_bitset32(struct sk_buff *skb, int attrtype, const u32 *val,
static const struct nla_policy bitset_policy[ETHTOOL_A_BITSET_MAX + 1] = {
[ETHTOOL_A_BITSET_UNSPEC] = { .type = NLA_REJECT },
[ETHTOOL_A_BITSET_NOMASK] = { .type = NLA_FLAG },
[ETHTOOL_A_BITSET_SIZE] = { .type = NLA_U32 },
[ETHTOOL_A_BITSET_SIZE] = NLA_POLICY_MAX(NLA_U32,
ETHNL_MAX_BITSET_SIZE),
[ETHTOOL_A_BITSET_BITS] = { .type = NLA_NESTED },
[ETHTOOL_A_BITSET_VALUE] = { .type = NLA_BINARY },
[ETHTOOL_A_BITSET_MASK] = { .type = NLA_BINARY },

View File

@ -3,6 +3,8 @@
#ifndef _NET_ETHTOOL_BITSET_H
#define _NET_ETHTOOL_BITSET_H
#define ETHNL_MAX_BITSET_SIZE S16_MAX
typedef const char (*const ethnl_string_array_t)[ETH_GSTRING_LEN];
int ethnl_bitset_is_compact(const struct nlattr *bitset, bool *compact);

View File

@ -1724,6 +1724,7 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options *opt = (struct ip_options *)optbuf;
int res;
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
return;
@ -1735,7 +1736,11 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
memset(opt, 0, sizeof(struct ip_options));
opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
rcu_read_lock();
res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
rcu_read_unlock();
if (res)
return;
if (gateway)

View File

@ -6124,7 +6124,11 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
{
struct request_sock *req;
tcp_try_undo_loss(sk, false);
/* If we are still handling the SYNACK RTO, see if timestamp ECR allows
* undo. If peer SACKs triggered fast recovery, we can't undo here.
*/
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
tcp_try_undo_loss(sk, false);
/* Reset rtx states to prevent spurious retransmits_timed_out() */
tcp_sk(sk)->retrans_stamp = 0;

View File

@ -183,9 +183,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = -EBUSY;
break;
}
} else if (sk->sk_protocol != IPPROTO_TCP)
} else if (sk->sk_protocol == IPPROTO_TCP) {
if (sk->sk_prot != &tcpv6_prot) {
retv = -EBUSY;
break;
}
break;
} else {
break;
}
if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;

View File

@ -2959,7 +2959,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
(auth_transaction == 2 &&
ifmgd->auth_data->expected_transaction == 2)) {
if (!ieee80211_mark_sta_auth(sdata, bssid))
goto out_err;
return; /* ignore frame -- wait for timeout */
} else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
auth_transaction == 2) {
sdata_info(sdata, "SAE peer confirmed\n");
@ -2967,10 +2967,6 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
}
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
return;
out_err:
mutex_unlock(&sdata->local->sta_mtx);
/* ignore frame -- wait for timeout */
}
#define case_WLAN(type) \

View File

@ -4114,7 +4114,7 @@ void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&local->sta_mtx);
list_for_each_entry_rcu(sta, &local->sta_list, list) {
list_for_each_entry(sta, &local->sta_list, list) {
if (sdata != sta->sdata &&
(!sta->sdata->bss || sta->sdata->bss != sdata->bss))
continue;

View File

@ -543,6 +543,11 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
}
}
static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
{
return 0;
}
static int __mptcp_init_sock(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@ -551,6 +556,7 @@ static int __mptcp_init_sock(struct sock *sk)
__set_bit(MPTCP_SEND_SPACE, &msk->flags);
msk->first = NULL;
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
return 0;
}

View File

@ -723,6 +723,20 @@ ip_set_rcu_get(struct net *net, ip_set_id_t index)
return set;
}
static inline void
ip_set_lock(struct ip_set *set)
{
if (!set->variant->region_lock)
spin_lock_bh(&set->lock);
}
static inline void
ip_set_unlock(struct ip_set *set)
{
if (!set->variant->region_lock)
spin_unlock_bh(&set->lock);
}
int
ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
@ -744,9 +758,9 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
if (ret == -EAGAIN) {
/* Type requests element to be completed */
pr_debug("element must be completed, ADD is triggered\n");
spin_lock_bh(&set->lock);
ip_set_lock(set);
set->variant->kadt(set, skb, par, IPSET_ADD, opt);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
ret = 1;
} else {
/* --return-nomatch: invert matched element */
@ -775,9 +789,9 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
spin_lock_bh(&set->lock);
ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
return ret;
}
@ -797,9 +811,9 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
spin_lock_bh(&set->lock);
ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
return ret;
}
@ -1264,9 +1278,9 @@ ip_set_flush_set(struct ip_set *set)
{
pr_debug("set: %s\n", set->name);
spin_lock_bh(&set->lock);
ip_set_lock(set);
set->variant->flush(set);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
}
static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
@ -1713,9 +1727,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
do {
spin_lock_bh(&set->lock);
ip_set_lock(set);
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
spin_unlock_bh(&set->lock);
ip_set_unlock(set);
retried = true;
} while (ret == -EAGAIN &&
set->variant->resize &&

File diff suppressed because it is too large Load Diff

View File

@ -1766,11 +1766,13 @@ static bool pipapo_match_field(struct nft_pipapo_field *f,
static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem)
{
const u8 *data = (const u8 *)elem->key.val.data;
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m = priv->clone;
struct nft_pipapo_elem *e = elem->priv;
int rules_f0, first_rule = 0;
struct nft_pipapo_elem *e;
const u8 *data;
data = (const u8 *)nft_set_ext_key(&e->ext);
e = pipapo_get(net, set, data, 0);
if (IS_ERR(e))

View File

@ -402,15 +402,6 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
remove_proc_entry(hinfo->name, parent);
}
static void htable_destroy(struct xt_hashlimit_htable *hinfo)
{
cancel_delayed_work_sync(&hinfo->gc_work);
htable_remove_proc_entry(hinfo);
htable_selective_cleanup(hinfo, true);
kfree(hinfo->name);
vfree(hinfo);
}
static struct xt_hashlimit_htable *htable_find_get(struct net *net,
const char *name,
u_int8_t family)
@ -432,8 +423,13 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
{
if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
hlist_del(&hinfo->node);
htable_remove_proc_entry(hinfo);
mutex_unlock(&hashlimit_mutex);
htable_destroy(hinfo);
cancel_delayed_work_sync(&hinfo->gc_work);
htable_selective_cleanup(hinfo, true);
kfree(hinfo->name);
vfree(hinfo);
}
}

View File

@ -497,8 +497,9 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
family->policy, validate, extack);
if (err && parallel) {
kfree(attrbuf);
if (err) {
if (parallel)
kfree(attrbuf);
return ERR_PTR(err);
}
return attrbuf;

View File

@ -186,6 +186,7 @@ static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
+ cookie_len /* TCA_ACT_COOKIE */
+ nla_total_size(0) /* TCA_ACT_STATS nested */
+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
/* TCA_STATS_BASIC */
+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
/* TCA_STATS_PKT64 */

View File

@ -512,15 +512,18 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
static int smc_connect_abort(struct smc_sock *smc, int reason_code,
int local_contact)
{
bool is_smcd = smc->conn.lgr->is_smcd;
if (local_contact == SMC_FIRST_CONTACT)
smc_lgr_forget(smc->conn.lgr);
if (smc->conn.lgr->is_smcd)
smc_lgr_cleanup_early(&smc->conn);
else
smc_conn_free(&smc->conn);
if (is_smcd)
/* there is only one lgr role for SMC-D; use server lock */
mutex_unlock(&smc_server_lgr_pending);
else
mutex_unlock(&smc_client_lgr_pending);
smc_conn_free(&smc->conn);
smc->connect_nonblock = 0;
return reason_code;
}
@ -1091,7 +1094,6 @@ static void smc_listen_out_err(struct smc_sock *new_smc)
if (newsmcsk->sk_state == SMC_INIT)
sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED;
smc_conn_free(&new_smc->conn);
smc_listen_out(new_smc);
}
@ -1102,12 +1104,13 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
{
/* RDMA setup failed, switch back to TCP */
if (local_contact == SMC_FIRST_CONTACT)
smc_lgr_forget(new_smc->conn.lgr);
smc_lgr_cleanup_early(&new_smc->conn);
else
smc_conn_free(&new_smc->conn);
if (reason_code < 0) { /* error, no fallback possible */
smc_listen_out_err(new_smc);
return;
}
smc_conn_free(&new_smc->conn);
smc_switch_to_fallback(new_smc);
new_smc->fallback_rsn = reason_code;
if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
@ -1170,16 +1173,18 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
new_smc->conn.lgr->vlan_id,
new_smc->conn.lgr->smcd)) {
if (ini->cln_first_contact == SMC_FIRST_CONTACT)
smc_lgr_forget(new_smc->conn.lgr);
smc_conn_free(&new_smc->conn);
smc_lgr_cleanup_early(&new_smc->conn);
else
smc_conn_free(&new_smc->conn);
return SMC_CLC_DECL_SMCDNOTALK;
}
/* Create send and receive buffers */
if (smc_buf_create(new_smc, true)) {
if (ini->cln_first_contact == SMC_FIRST_CONTACT)
smc_lgr_forget(new_smc->conn.lgr);
smc_conn_free(&new_smc->conn);
smc_lgr_cleanup_early(&new_smc->conn);
else
smc_conn_free(&new_smc->conn);
return SMC_CLC_DECL_MEM;
}

View File

@ -162,6 +162,18 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
conn->lgr = NULL;
}
void smc_lgr_cleanup_early(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
if (!lgr)
return;
smc_conn_free(conn);
smc_lgr_forget(lgr);
smc_lgr_schedule_free_work_fast(lgr);
}
/* Send delete link, either as client to request the initiation
* of the DELETE LINK sequence from server; or as server to
* initiate the delete processing. See smc_llc_rx_delete_link().

View File

@ -296,6 +296,7 @@ struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_cleanup_early(struct smc_connection *conn);
void smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
@ -316,7 +317,6 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini);
void smc_conn_free(struct smc_connection *conn);
int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini);
void smcd_conn_free(struct smc_connection *conn);
void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
int smc_core_init(void);
void smc_core_exit(void);

View File

@ -573,6 +573,8 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
struct smc_ib_device *smcibdev;
smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
if (!smcibdev || smcibdev->ibdev != ibdev)
return;
ib_set_client_data(ibdev, &smc_ib_client, NULL);
spin_lock(&smc_ib_devices.lock);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */

View File

@ -682,6 +682,7 @@ static int unix_set_peek_off(struct sock *sk, int val)
return 0;
}
#ifdef CONFIG_PROC_FS
static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
{
struct sock *sk = sock->sk;
@ -692,6 +693,9 @@ static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
seq_printf(m, "scm_fds: %u\n", READ_ONCE(u->scm_stat.nr_fds));
}
}
#else
#define unix_show_fdinfo NULL
#endif
static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,

View File

@ -451,6 +451,12 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
if (vsk->transport == new_transport)
return 0;
/* transport->release() must be called with sock lock acquired.
* This path can only be taken during vsock_stream_connect(),
* where we have already held the sock lock.
* In the other cases, this function is called on a new socket
* which is not assigned to any transport.
*/
vsk->transport->release(vsk);
vsock_deassign_transport(vsk);
}
@ -753,20 +759,18 @@ static void __vsock_release(struct sock *sk, int level)
vsk = vsock_sk(sk);
pending = NULL; /* Compiler warning. */
/* The release call is supposed to use lock_sock_nested()
* rather than lock_sock(), if a sock lock should be acquired.
*/
if (vsk->transport)
vsk->transport->release(vsk);
else if (sk->sk_type == SOCK_STREAM)
vsock_remove_sock(vsk);
/* When "level" is SINGLE_DEPTH_NESTING, use the nested
* version to avoid the warning "possible recursive locking
* detected". When "level" is 0, lock_sock_nested(sk, level)
* is the same as lock_sock(sk).
*/
lock_sock_nested(sk, level);
if (vsk->transport)
vsk->transport->release(vsk);
else if (sk->sk_type == SOCK_STREAM)
vsock_remove_sock(vsk);
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;

View File

@ -526,12 +526,9 @@ static bool hvs_close_lock_held(struct vsock_sock *vsk)
static void hvs_release(struct vsock_sock *vsk)
{
struct sock *sk = sk_vsock(vsk);
bool remove_sock;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
remove_sock = hvs_close_lock_held(vsk);
release_sock(sk);
if (remove_sock)
vsock_remove_sock(vsk);
}

View File

@ -829,7 +829,6 @@ void virtio_transport_release(struct vsock_sock *vsk)
struct sock *sk = &vsk->sk;
bool remove_sock = true;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type == SOCK_STREAM)
remove_sock = virtio_transport_close(vsk);
@ -837,7 +836,6 @@ void virtio_transport_release(struct vsock_sock *vsk)
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}
release_sock(sk);
if (remove_sock)
vsock_remove_sock(vsk);

View File

@ -20,6 +20,7 @@
#include <linux/netlink.h>
#include <linux/nospec.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <net/net_namespace.h>
#include <net/genetlink.h>
#include <net/cfg80211.h>
@ -4800,8 +4801,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
err = nl80211_parse_he_obss_pd(
info->attrs[NL80211_ATTR_HE_OBSS_PD],
&params.he_obss_pd);
if (err)
return err;
goto out;
}
nl80211_calculate_ap_params(&params);
@ -4823,6 +4823,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
}
wdev_unlock(wdev);
out:
kfree(params.acl);
return err;

View File

@ -2276,7 +2276,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
break;
}
if (IS_ERR(reg_rule)) {
if (IS_ERR_OR_NULL(reg_rule)) {
pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n",
chan->center_freq);
if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {

View File

@ -13,11 +13,12 @@
KSELFTEST_SKIP=4
# Available test groups:
# - reported_issues: check for issues that were reported in the past
# - correctness: check that packets match given entries, and only those
# - concurrency: attempt races between insertion, deletion and lookup
# - timeout: check that packets match entries until they expire
# - performance: estimate matching rate, compare with rbtree and hash baselines
TESTS="correctness concurrency timeout"
TESTS="reported_issues correctness concurrency timeout"
[ "${quicktest}" != "1" ] && TESTS="${TESTS} performance"
# Set types, defined by TYPE_ variables below
@ -25,6 +26,9 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port
net_port_mac_proto_net"
# Reported bugs, also described by TYPE_ variables below
BUGS="flush_remove_add"
# List of possible paths to pktgen script from kernel tree for performance tests
PKTGEN_SCRIPT_PATHS="
../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
@ -327,6 +331,12 @@ flood_spec ip daddr . tcp dport . meta l4proto . ip saddr
perf_duration 0
"
# Definition of tests for bugs reported in the past:
# display display text for test report
TYPE_flush_remove_add="
display Add two elements, flush, re-add
"
# Set template for all tests, types and rules are filled in depending on test
set_template='
flush ruleset
@ -440,6 +450,8 @@ setup_set() {
# Check that at least one of the needed tools is available
check_tools() {
[ -z "${tools}" ] && return 0
__tools=
for tool in ${tools}; do
if [ "${tool}" = "nc" ] && [ "${proto}" = "udp6" ] && \
@ -1025,7 +1037,7 @@ format_noconcat() {
add() {
if ! nft add element inet filter test "${1}"; then
err "Failed to add ${1} given ruleset:"
err "$(nft list ruleset -a)"
err "$(nft -a list ruleset)"
return 1
fi
}
@ -1045,7 +1057,7 @@ add_perf() {
add_perf_norange() {
if ! nft add element netdev perf norange "${1}"; then
err "Failed to add ${1} given ruleset:"
err "$(nft list ruleset -a)"
err "$(nft -a list ruleset)"
return 1
fi
}
@ -1054,7 +1066,7 @@ add_perf_norange() {
add_perf_noconcat() {
if ! nft add element netdev perf noconcat "${1}"; then
err "Failed to add ${1} given ruleset:"
err "$(nft list ruleset -a)"
err "$(nft -a list ruleset)"
return 1
fi
}
@ -1063,7 +1075,7 @@ add_perf_noconcat() {
del() {
if ! nft delete element inet filter test "${1}"; then
err "Failed to delete ${1} given ruleset:"
err "$(nft list ruleset -a)"
err "$(nft -a list ruleset)"
return 1
fi
}
@ -1134,7 +1146,7 @@ send_match() {
err " $(for f in ${src}; do
eval format_\$f "${2}"; printf ' '; done)"
err "should have matched ruleset:"
err "$(nft list ruleset -a)"
err "$(nft -a list ruleset)"
return 1
fi
nft reset counter inet filter test >/dev/null
@ -1160,7 +1172,7 @@ send_nomatch() {
err " $(for f in ${src}; do
eval format_\$f "${2}"; printf ' '; done)"
err "should not have matched ruleset:"
err "$(nft list ruleset -a)"
err "$(nft -a list ruleset)"
return 1
fi
}
@ -1430,6 +1442,23 @@ test_performance() {
kill "${perf_pid}"
}
test_bug_flush_remove_add() {
set_cmd='{ set s { type ipv4_addr . inet_service; flags interval; }; }'
elem1='{ 10.0.0.1 . 22-25, 10.0.0.1 . 10-20 }'
elem2='{ 10.0.0.1 . 10-20, 10.0.0.1 . 22-25 }'
for i in `seq 1 100`; do
nft add table t ${set_cmd} || return ${KSELFTEST_SKIP}
nft add element t s ${elem1} 2>/dev/null || return 1
nft flush set t s 2>/dev/null || return 1
nft add element t s ${elem2} 2>/dev/null || return 1
done
nft flush ruleset
}
test_reported_issues() {
eval test_bug_"${subtest}"
}
# Run everything in a separate network namespace
[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; }
tmp="$(mktemp)"
@ -1438,9 +1467,15 @@ trap cleanup EXIT
# Entry point for test runs
passed=0
for name in ${TESTS}; do
printf "TEST: %s\n" "${name}"
for type in ${TYPES}; do
eval desc=\$TYPE_"${type}"
printf "TEST: %s\n" "$(echo ${name} | tr '_' ' ')"
if [ "${name}" = "reported_issues" ]; then
SUBTESTS="${BUGS}"
else
SUBTESTS="${TYPES}"
fi
for subtest in ${SUBTESTS}; do
eval desc=\$TYPE_"${subtest}"
IFS='
'
for __line in ${desc}; do