mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Some straggler bug fixes: 1) Batman-adv DAT must consider VLAN IDs when choosing candidate nodes, from Antonio Quartulli. 2) Fix botched reference counting of vlan objects and neigh nodes in batman-adv, from Sven Eckelmann. 3) netem can crash when it sees GSO packets, the fix is to segment then upon ->enqueue. Fix from Neil Horman with help from Eric Dumazet. 4) Fix VXLAN dependencies in mlx5 driver Kconfig, from Matthew Finlay. 5) Handle VXLAN ops outside of rcu lock, via a workqueue, in mlx5, since it can sleep. Fix also from Matthew Finlay. 6) Check mdiobus_scan() return values properly in pxa168_eth and macb drivers. From Sergei Shtylyov. 7) If the netdevice doesn't support checksumming, disable segmentation. From Alexandery Duyck. 8) Fix races between RDS tcp accept and sending, from Sowmini Varadhan. 9) In macb driver, probe MDIO bus before we register the netdev, otherwise we can try to open the device before it is really ready for that. Fix from Florian Fainelli. 10) Netlink attribute size for ILA "tunnels" not calculated properly, fix from Nicolas Dichtel" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: ipv6/ila: fix nlsize calculation for lwtunnel net: macb: Probe MDIO bus before registering netdev RDS: TCP: Synchronize accept() and connect() paths on t_conn_lock. RDS:TCP: Synchronize rds_tcp_accept_one with rds_send_xmit when resetting t_sock vxlan: Add checksum check to the features check function net: Disable segmentation if checksumming is not supported net: mvneta: Remove superfluous SMP function call macb: fix mdiobus_scan() error check pxa168_eth: fix mdiobus_scan() error check net/mlx5e: Use workqueue for vxlan ops net/mlx5e: Implement a mlx5e workqueue net/mlx5: Kconfig: Fix MLX5_EN/VXLAN build issue net/mlx5: Unmap only the relevant IO memory mapping netem: Segment GSO packets on enqueue batman-adv: Fix reference counting of hardif_neigh_node object for neigh_node batman-adv: Fix reference counting of vlan object for tt_local_entry batman-adv: B.A.T.M.A.N V - make sure iface is reactivated upon NETDEV_UP event batman-adv: fix DAT candidate selection (must use vid)
This commit is contained in:
commit
7391daf2ff
|
@ -441,7 +441,7 @@ static int macb_mii_init(struct macb *bp)
|
|||
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
bp->pdev->name, bp->pdev->id);
|
||||
bp->mii_bus->priv = bp;
|
||||
bp->mii_bus->parent = &bp->dev->dev;
|
||||
bp->mii_bus->parent = &bp->pdev->dev;
|
||||
pdata = dev_get_platdata(&bp->pdev->dev);
|
||||
|
||||
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
|
||||
|
@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp)
|
|||
struct phy_device *phydev;
|
||||
|
||||
phydev = mdiobus_scan(bp->mii_bus, i);
|
||||
if (IS_ERR(phydev)) {
|
||||
if (IS_ERR(phydev) &&
|
||||
PTR_ERR(phydev) != -ENODEV) {
|
||||
err = PTR_ERR(phydev);
|
||||
break;
|
||||
}
|
||||
|
@ -3019,29 +3020,36 @@ static int macb_probe(struct platform_device *pdev)
|
|||
if (err)
|
||||
goto err_out_free_netdev;
|
||||
|
||||
err = macb_mii_init(bp);
|
||||
if (err)
|
||||
goto err_out_free_netdev;
|
||||
|
||||
phydev = bp->phy_dev;
|
||||
|
||||
netif_carrier_off(dev);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
|
||||
goto err_out_unregister_netdev;
|
||||
goto err_out_unregister_mdio;
|
||||
}
|
||||
|
||||
err = macb_mii_init(bp);
|
||||
if (err)
|
||||
goto err_out_unregister_netdev;
|
||||
|
||||
netif_carrier_off(dev);
|
||||
phy_attached_info(phydev);
|
||||
|
||||
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
|
||||
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
|
||||
dev->base_addr, dev->irq, dev->dev_addr);
|
||||
|
||||
phydev = bp->phy_dev;
|
||||
phy_attached_info(phydev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_unregister_netdev:
|
||||
unregister_netdev(dev);
|
||||
err_out_unregister_mdio:
|
||||
phy_disconnect(bp->phy_dev);
|
||||
mdiobus_unregister(bp->mii_bus);
|
||||
mdiobus_free(bp->mii_bus);
|
||||
|
||||
/* Shutdown the PHY if there is a GPIO reset */
|
||||
if (bp->reset_gpio)
|
||||
gpiod_set_value(bp->reset_gpio, 0);
|
||||
|
||||
err_out_free_netdev:
|
||||
free_netdev(dev);
|
||||
|
|
|
@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
|
|||
/* Enable per-CPU interrupts on the CPU that is
|
||||
* brought up.
|
||||
*/
|
||||
smp_call_function_single(cpu, mvneta_percpu_enable,
|
||||
pp, true);
|
||||
mvneta_percpu_enable(pp);
|
||||
|
||||
/* Enable per-CPU interrupt on the one CPU we care
|
||||
* about.
|
||||
|
@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
|
|||
/* Disable per-CPU interrupts on the CPU that is
|
||||
* brought down.
|
||||
*/
|
||||
smp_call_function_single(cpu, mvneta_percpu_disable,
|
||||
pp, true);
|
||||
mvneta_percpu_disable(pp);
|
||||
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
|
|
|
@ -979,6 +979,8 @@ static int pxa168_init_phy(struct net_device *dev)
|
|||
return 0;
|
||||
|
||||
pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
|
||||
if (IS_ERR(pep->phy))
|
||||
return PTR_ERR(pep->phy);
|
||||
if (!pep->phy)
|
||||
return -ENODEV;
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ config MLX5_CORE_EN
|
|||
bool "Mellanox Technologies ConnectX-4 Ethernet support"
|
||||
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
|
||||
select PTP_1588_CLOCK
|
||||
select VXLAN if MLX5_CORE=y
|
||||
default n
|
||||
---help---
|
||||
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
|
||||
|
|
|
@ -567,6 +567,7 @@ struct mlx5e_priv {
|
|||
struct mlx5e_vxlan_db vxlan;
|
||||
|
||||
struct mlx5e_params params;
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct update_carrier_work;
|
||||
struct work_struct set_rx_mode_work;
|
||||
struct delayed_work update_stats_work;
|
||||
|
|
|
@ -262,9 +262,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
|
|||
mutex_lock(&priv->state_lock);
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
mlx5e_update_stats(priv);
|
||||
schedule_delayed_work(dwork,
|
||||
msecs_to_jiffies(
|
||||
MLX5E_UPDATE_STATS_INTERVAL));
|
||||
queue_delayed_work(priv->wq, dwork,
|
||||
msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
|
||||
}
|
||||
mutex_unlock(&priv->state_lock);
|
||||
}
|
||||
|
@ -280,7 +279,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
|
|||
switch (event) {
|
||||
case MLX5_DEV_EVENT_PORT_UP:
|
||||
case MLX5_DEV_EVENT_PORT_DOWN:
|
||||
schedule_work(&priv->update_carrier_work);
|
||||
queue_work(priv->wq, &priv->update_carrier_work);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1505,7 +1504,7 @@ int mlx5e_open_locked(struct net_device *netdev)
|
|||
mlx5e_update_carrier(priv);
|
||||
mlx5e_timestamp_init(priv);
|
||||
|
||||
schedule_delayed_work(&priv->update_stats_work, 0);
|
||||
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1961,7 +1960,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
|
|||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
|
||||
schedule_work(&priv->set_rx_mode_work);
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
}
|
||||
|
||||
static int mlx5e_set_mac(struct net_device *netdev, void *addr)
|
||||
|
@ -1976,7 +1975,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
|
|||
ether_addr_copy(netdev->dev_addr, saddr->sa_data);
|
||||
netif_addr_unlock_bh(netdev);
|
||||
|
||||
schedule_work(&priv->set_rx_mode_work);
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2158,7 +2157,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
|
|||
if (!mlx5e_vxlan_allowed(priv->mdev))
|
||||
return;
|
||||
|
||||
mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
|
||||
mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
|
||||
}
|
||||
|
||||
static void mlx5e_del_vxlan_port(struct net_device *netdev,
|
||||
|
@ -2169,7 +2168,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
|
|||
if (!mlx5e_vxlan_allowed(priv->mdev))
|
||||
return;
|
||||
|
||||
mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
|
||||
mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
|
||||
}
|
||||
|
||||
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
|
||||
|
@ -2498,10 +2497,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
|
|||
|
||||
priv = netdev_priv(netdev);
|
||||
|
||||
priv->wq = create_singlethread_workqueue("mlx5e");
|
||||
if (!priv->wq)
|
||||
goto err_free_netdev;
|
||||
|
||||
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
|
||||
goto err_free_netdev;
|
||||
goto err_destroy_wq;
|
||||
}
|
||||
|
||||
err = mlx5_core_alloc_pd(mdev, &priv->pdn);
|
||||
|
@ -2580,7 +2583,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
|
|||
vxlan_get_rx_port(netdev);
|
||||
|
||||
mlx5e_enable_async_events(priv);
|
||||
schedule_work(&priv->set_rx_mode_work);
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
|
||||
return priv;
|
||||
|
||||
|
@ -2617,6 +2620,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
|
|||
err_unmap_free_uar:
|
||||
mlx5_unmap_free_uar(mdev, &priv->cq_uar);
|
||||
|
||||
err_destroy_wq:
|
||||
destroy_workqueue(priv->wq);
|
||||
|
||||
err_free_netdev:
|
||||
free_netdev(netdev);
|
||||
|
||||
|
@ -2630,9 +2636,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
|
|||
|
||||
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||
|
||||
schedule_work(&priv->set_rx_mode_work);
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
mlx5e_disable_async_events(priv);
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(priv->wq);
|
||||
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
|
||||
netif_device_detach(netdev);
|
||||
mutex_lock(&priv->state_lock);
|
||||
|
@ -2655,6 +2661,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
|
|||
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
|
||||
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
|
||||
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
|
||||
cancel_delayed_work_sync(&priv->update_stats_work);
|
||||
destroy_workqueue(priv->wq);
|
||||
|
||||
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
|
||||
free_netdev(netdev);
|
||||
|
|
|
@ -269,7 +269,9 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
|
|||
|
||||
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
|
||||
{
|
||||
if (uar->map)
|
||||
iounmap(uar->map);
|
||||
else
|
||||
iounmap(uar->bf_map);
|
||||
mlx5_cmd_free_uar(mdev, uar->index);
|
||||
}
|
||||
|
|
|
@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
|
|||
return vxlan;
|
||||
}
|
||||
|
||||
int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
|
||||
static void mlx5e_vxlan_add_port(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_vxlan_work *vxlan_work =
|
||||
container_of(work, struct mlx5e_vxlan_work, work);
|
||||
struct mlx5e_priv *priv = vxlan_work->priv;
|
||||
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
|
||||
u16 port = vxlan_work->port;
|
||||
struct mlx5e_vxlan *vxlan;
|
||||
int err;
|
||||
|
||||
err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
|
||||
if (err)
|
||||
return err;
|
||||
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
|
||||
goto free_work;
|
||||
|
||||
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
|
||||
if (!vxlan) {
|
||||
err = -ENOMEM;
|
||||
if (!vxlan)
|
||||
goto err_delete_port;
|
||||
}
|
||||
|
||||
vxlan->udp_port = port;
|
||||
|
||||
|
@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
|
|||
if (err)
|
||||
goto err_free;
|
||||
|
||||
return 0;
|
||||
goto free_work;
|
||||
|
||||
err_free:
|
||||
kfree(vxlan);
|
||||
err_delete_port:
|
||||
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
|
||||
return err;
|
||||
free_work:
|
||||
kfree(vxlan_work);
|
||||
}
|
||||
|
||||
static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
|
||||
|
@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
|
|||
kfree(vxlan);
|
||||
}
|
||||
|
||||
void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
|
||||
static void mlx5e_vxlan_del_port(struct work_struct *work)
|
||||
{
|
||||
if (!mlx5e_vxlan_lookup_port(priv, port))
|
||||
return;
|
||||
struct mlx5e_vxlan_work *vxlan_work =
|
||||
container_of(work, struct mlx5e_vxlan_work, work);
|
||||
struct mlx5e_priv *priv = vxlan_work->priv;
|
||||
u16 port = vxlan_work->port;
|
||||
|
||||
__mlx5e_vxlan_core_del_port(priv, port);
|
||||
|
||||
kfree(vxlan_work);
|
||||
}
|
||||
|
||||
void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
|
||||
u16 port, int add)
|
||||
{
|
||||
struct mlx5e_vxlan_work *vxlan_work;
|
||||
|
||||
vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
|
||||
if (!vxlan_work)
|
||||
return;
|
||||
|
||||
if (add)
|
||||
INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
|
||||
else
|
||||
INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
|
||||
|
||||
vxlan_work->priv = priv;
|
||||
vxlan_work->port = port;
|
||||
vxlan_work->sa_family = sa_family;
|
||||
queue_work(priv->wq, &vxlan_work->work);
|
||||
}
|
||||
|
||||
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
|
||||
|
|
|
@ -39,6 +39,13 @@ struct mlx5e_vxlan {
|
|||
u16 udp_port;
|
||||
};
|
||||
|
||||
struct mlx5e_vxlan_work {
|
||||
struct work_struct work;
|
||||
struct mlx5e_priv *priv;
|
||||
sa_family_t sa_family;
|
||||
u16 port;
|
||||
};
|
||||
|
||||
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
|
||||
|
@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
|
|||
}
|
||||
|
||||
void mlx5e_vxlan_init(struct mlx5e_priv *priv);
|
||||
int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
|
||||
void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
|
||||
void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
|
||||
u16 port, int add);
|
||||
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
|
||||
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
|
||||
|
||||
|
|
|
@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
|
|||
return (struct ethhdr *)skb_mac_header(skb);
|
||||
}
|
||||
|
||||
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
|
||||
{
|
||||
return (struct ethhdr *)skb_inner_mac_header(skb);
|
||||
}
|
||||
|
||||
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
|
||||
|
||||
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
|
||||
|
|
|
@ -252,7 +252,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
|
|||
(skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
|
||||
skb->inner_protocol != htons(ETH_P_TEB) ||
|
||||
(skb_inner_mac_header(skb) - skb_transport_header(skb) !=
|
||||
sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
|
||||
sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
|
||||
(skb->ip_summed != CHECKSUM_NONE &&
|
||||
!can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
|
||||
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
||||
|
||||
return features;
|
||||
|
|
|
@ -32,10 +32,21 @@
|
|||
|
||||
#include "bat_v_elp.h"
|
||||
#include "bat_v_ogm.h"
|
||||
#include "hard-interface.h"
|
||||
#include "hash.h"
|
||||
#include "originator.h"
|
||||
#include "packet.h"
|
||||
|
||||
static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
|
||||
{
|
||||
/* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
|
||||
* set the interface as ACTIVE right away, without any risk of race
|
||||
* condition
|
||||
*/
|
||||
if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
|
||||
hard_iface->if_status = BATADV_IF_ACTIVE;
|
||||
}
|
||||
|
||||
static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
|
||||
{
|
||||
int ret;
|
||||
|
@ -274,6 +285,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
|
|||
|
||||
static struct batadv_algo_ops batadv_batman_v __read_mostly = {
|
||||
.name = "BATMAN_V",
|
||||
.bat_iface_activate = batadv_v_iface_activate,
|
||||
.bat_iface_enable = batadv_v_iface_enable,
|
||||
.bat_iface_disable = batadv_v_iface_disable,
|
||||
.bat_iface_update_mac = batadv_v_iface_update_mac,
|
||||
|
|
|
@ -568,6 +568,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
|
|||
* be sent to
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @ip_dst: ipv4 to look up in the DHT
|
||||
* @vid: VLAN identifier
|
||||
*
|
||||
* An originator O is selected if and only if its DHT_ID value is one of three
|
||||
* closest values (from the LEFT, with wrap around if needed) then the hash
|
||||
|
@ -576,7 +577,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
|
|||
* Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
|
||||
*/
|
||||
static struct batadv_dat_candidate *
|
||||
batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
|
||||
batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
|
||||
unsigned short vid)
|
||||
{
|
||||
int select;
|
||||
batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
|
||||
|
@ -592,7 +594,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
|
|||
return NULL;
|
||||
|
||||
dat.ip = ip_dst;
|
||||
dat.vid = 0;
|
||||
dat.vid = vid;
|
||||
ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
|
||||
BATADV_DAT_ADDR_MAX);
|
||||
|
||||
|
@ -612,6 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
|
|||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @skb: payload to send
|
||||
* @ip: the DHT key
|
||||
* @vid: VLAN identifier
|
||||
* @packet_subtype: unicast4addr packet subtype to use
|
||||
*
|
||||
* This function copies the skb with pskb_copy() and is sent as unicast packet
|
||||
|
@ -622,7 +625,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
|
|||
*/
|
||||
static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
|
||||
struct sk_buff *skb, __be32 ip,
|
||||
int packet_subtype)
|
||||
unsigned short vid, int packet_subtype)
|
||||
{
|
||||
int i;
|
||||
bool ret = false;
|
||||
|
@ -631,7 +634,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
|
|||
struct sk_buff *tmp_skb;
|
||||
struct batadv_dat_candidate *cand;
|
||||
|
||||
cand = batadv_dat_select_candidates(bat_priv, ip);
|
||||
cand = batadv_dat_select_candidates(bat_priv, ip, vid);
|
||||
if (!cand)
|
||||
goto out;
|
||||
|
||||
|
@ -1022,7 +1025,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
|
|||
ret = true;
|
||||
} else {
|
||||
/* Send the request to the DHT */
|
||||
ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
|
||||
ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
|
||||
BATADV_P_DAT_DHT_GET);
|
||||
}
|
||||
out:
|
||||
|
@ -1150,8 +1153,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
|
|||
/* Send the ARP reply to the candidates for both the IP addresses that
|
||||
* the node obtained from the ARP reply
|
||||
*/
|
||||
batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
|
||||
batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
|
||||
batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
|
||||
batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -407,6 +407,9 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
|
|||
|
||||
batadv_update_min_mtu(hard_iface->soft_iface);
|
||||
|
||||
if (bat_priv->bat_algo_ops->bat_iface_activate)
|
||||
bat_priv->bat_algo_ops->bat_iface_activate(hard_iface);
|
||||
|
||||
out:
|
||||
if (primary_if)
|
||||
batadv_hardif_put(primary_if);
|
||||
|
|
|
@ -250,7 +250,6 @@ static void batadv_neigh_node_release(struct kref *ref)
|
|||
{
|
||||
struct hlist_node *node_tmp;
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_hardif_neigh_node *hardif_neigh;
|
||||
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
||||
struct batadv_algo_ops *bao;
|
||||
|
||||
|
@ -262,13 +261,7 @@ static void batadv_neigh_node_release(struct kref *ref)
|
|||
batadv_neigh_ifinfo_put(neigh_ifinfo);
|
||||
}
|
||||
|
||||
hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
|
||||
neigh_node->addr);
|
||||
if (hardif_neigh) {
|
||||
/* batadv_hardif_neigh_get() increases refcount too */
|
||||
batadv_hardif_neigh_put(hardif_neigh);
|
||||
batadv_hardif_neigh_put(hardif_neigh);
|
||||
}
|
||||
batadv_hardif_neigh_put(neigh_node->hardif_neigh);
|
||||
|
||||
if (bao->bat_neigh_free)
|
||||
bao->bat_neigh_free(neigh_node);
|
||||
|
@ -665,6 +658,10 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
|
|||
neigh_node->orig_node = orig_node;
|
||||
neigh_node->last_seen = jiffies;
|
||||
|
||||
/* increment unique neighbor refcount */
|
||||
kref_get(&hardif_neigh->refcount);
|
||||
neigh_node->hardif_neigh = hardif_neigh;
|
||||
|
||||
/* extra reference for return */
|
||||
kref_init(&neigh_node->refcount);
|
||||
kref_get(&neigh_node->refcount);
|
||||
|
@ -673,9 +670,6 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
|
|||
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* increment unique neighbor refcount */
|
||||
kref_get(&hardif_neigh->refcount);
|
||||
|
||||
batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
|
||||
"Creating new neighbor %pM for orig_node %pM on interface %s\n",
|
||||
neigh_addr, orig_node->orig, hard_iface->net_dev->name);
|
||||
|
|
|
@ -215,6 +215,8 @@ static void batadv_tt_local_entry_release(struct kref *ref)
|
|||
tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
|
||||
common.refcount);
|
||||
|
||||
batadv_softif_vlan_put(tt_local_entry->vlan);
|
||||
|
||||
kfree_rcu(tt_local_entry, common.rcu);
|
||||
}
|
||||
|
||||
|
@ -673,6 +675,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
|
|||
kref_get(&tt_local->common.refcount);
|
||||
tt_local->last_seen = jiffies;
|
||||
tt_local->common.added_at = tt_local->last_seen;
|
||||
tt_local->vlan = vlan;
|
||||
|
||||
/* the batman interface mac and multicast addresses should never be
|
||||
* purged
|
||||
|
@ -991,7 +994,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
|
|||
struct batadv_tt_common_entry *tt_common_entry;
|
||||
struct batadv_tt_local_entry *tt_local;
|
||||
struct batadv_hard_iface *primary_if;
|
||||
struct batadv_softif_vlan *vlan;
|
||||
struct hlist_head *head;
|
||||
unsigned short vid;
|
||||
u32 i;
|
||||
|
@ -1027,14 +1029,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
|
|||
last_seen_msecs = last_seen_msecs % 1000;
|
||||
|
||||
no_purge = tt_common_entry->flags & np_flag;
|
||||
|
||||
vlan = batadv_softif_vlan_get(bat_priv, vid);
|
||||
if (!vlan) {
|
||||
seq_printf(seq, "Cannot retrieve VLAN %d\n",
|
||||
BATADV_PRINT_VID(vid));
|
||||
continue;
|
||||
}
|
||||
|
||||
seq_printf(seq,
|
||||
" * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n",
|
||||
tt_common_entry->addr,
|
||||
|
@ -1052,9 +1046,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
|
|||
BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
|
||||
no_purge ? 0 : last_seen_secs,
|
||||
no_purge ? 0 : last_seen_msecs,
|
||||
vlan->tt.crc);
|
||||
|
||||
batadv_softif_vlan_put(vlan);
|
||||
tt_local->vlan->tt.crc);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -1099,7 +1091,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
|
|||
{
|
||||
struct batadv_tt_local_entry *tt_local_entry;
|
||||
u16 flags, curr_flags = BATADV_NO_FLAGS;
|
||||
struct batadv_softif_vlan *vlan;
|
||||
void *tt_entry_exists;
|
||||
|
||||
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
|
||||
|
@ -1139,14 +1130,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
|
|||
/* extra call to free the local tt entry */
|
||||
batadv_tt_local_entry_put(tt_local_entry);
|
||||
|
||||
/* decrease the reference held for this vlan */
|
||||
vlan = batadv_softif_vlan_get(bat_priv, vid);
|
||||
if (!vlan)
|
||||
goto out;
|
||||
|
||||
batadv_softif_vlan_put(vlan);
|
||||
batadv_softif_vlan_put(vlan);
|
||||
|
||||
out:
|
||||
if (tt_local_entry)
|
||||
batadv_tt_local_entry_put(tt_local_entry);
|
||||
|
@ -1219,7 +1202,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
|
|||
spinlock_t *list_lock; /* protects write access to the hash lists */
|
||||
struct batadv_tt_common_entry *tt_common_entry;
|
||||
struct batadv_tt_local_entry *tt_local;
|
||||
struct batadv_softif_vlan *vlan;
|
||||
struct hlist_node *node_tmp;
|
||||
struct hlist_head *head;
|
||||
u32 i;
|
||||
|
@ -1241,14 +1223,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
|
|||
struct batadv_tt_local_entry,
|
||||
common);
|
||||
|
||||
/* decrease the reference held for this vlan */
|
||||
vlan = batadv_softif_vlan_get(bat_priv,
|
||||
tt_common_entry->vid);
|
||||
if (vlan) {
|
||||
batadv_softif_vlan_put(vlan);
|
||||
batadv_softif_vlan_put(vlan);
|
||||
}
|
||||
|
||||
batadv_tt_local_entry_put(tt_local);
|
||||
}
|
||||
spin_unlock_bh(list_lock);
|
||||
|
@ -3309,7 +3283,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
|
|||
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
|
||||
struct batadv_tt_common_entry *tt_common;
|
||||
struct batadv_tt_local_entry *tt_local;
|
||||
struct batadv_softif_vlan *vlan;
|
||||
struct hlist_node *node_tmp;
|
||||
struct hlist_head *head;
|
||||
spinlock_t *list_lock; /* protects write access to the hash lists */
|
||||
|
@ -3339,13 +3312,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
|
|||
struct batadv_tt_local_entry,
|
||||
common);
|
||||
|
||||
/* decrease the reference held for this vlan */
|
||||
vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
|
||||
if (vlan) {
|
||||
batadv_softif_vlan_put(vlan);
|
||||
batadv_softif_vlan_put(vlan);
|
||||
}
|
||||
|
||||
batadv_tt_local_entry_put(tt_local);
|
||||
}
|
||||
spin_unlock_bh(list_lock);
|
||||
|
|
|
@ -433,6 +433,7 @@ struct batadv_hardif_neigh_node {
|
|||
* @ifinfo_lock: lock protecting private ifinfo members and list
|
||||
* @if_incoming: pointer to incoming hard-interface
|
||||
* @last_seen: when last packet via this neighbor was received
|
||||
* @hardif_neigh: hardif_neigh of this neighbor
|
||||
* @refcount: number of contexts the object is used
|
||||
* @rcu: struct used for freeing in an RCU-safe manner
|
||||
*/
|
||||
|
@ -444,6 +445,7 @@ struct batadv_neigh_node {
|
|||
spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
|
||||
struct batadv_hard_iface *if_incoming;
|
||||
unsigned long last_seen;
|
||||
struct batadv_hardif_neigh_node *hardif_neigh;
|
||||
struct kref refcount;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
@ -1073,10 +1075,12 @@ struct batadv_tt_common_entry {
|
|||
* struct batadv_tt_local_entry - translation table local entry data
|
||||
* @common: general translation table data
|
||||
* @last_seen: timestamp used for purging stale tt local entries
|
||||
* @vlan: soft-interface vlan of the entry
|
||||
*/
|
||||
struct batadv_tt_local_entry {
|
||||
struct batadv_tt_common_entry common;
|
||||
unsigned long last_seen;
|
||||
struct batadv_softif_vlan *vlan;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1250,6 +1254,8 @@ struct batadv_forw_packet {
|
|||
* struct batadv_algo_ops - mesh algorithm callbacks
|
||||
* @list: list node for the batadv_algo_list
|
||||
* @name: name of the algorithm
|
||||
* @bat_iface_activate: start routing mechanisms when hard-interface is brought
|
||||
* up
|
||||
* @bat_iface_enable: init routing info when hard-interface is enabled
|
||||
* @bat_iface_disable: de-init routing info when hard-interface is disabled
|
||||
* @bat_iface_update_mac: (re-)init mac addresses of the protocol information
|
||||
|
@ -1277,6 +1283,7 @@ struct batadv_forw_packet {
|
|||
struct batadv_algo_ops {
|
||||
struct hlist_node list;
|
||||
char *name;
|
||||
void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface);
|
||||
int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
|
||||
void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
|
||||
void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
|
||||
|
|
|
@ -2802,7 +2802,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
|
|||
|
||||
if (skb->ip_summed != CHECKSUM_NONE &&
|
||||
!can_checksum_protocol(features, type)) {
|
||||
features &= ~NETIF_F_CSUM_MASK;
|
||||
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
||||
} else if (illegal_highdma(skb->dev, skb)) {
|
||||
features &= ~NETIF_F_SG;
|
||||
}
|
||||
|
|
|
@ -120,8 +120,7 @@ static int ila_fill_encap_info(struct sk_buff *skb,
|
|||
|
||||
static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
|
||||
{
|
||||
/* No encapsulation overhead */
|
||||
return 0;
|
||||
return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
|
||||
}
|
||||
|
||||
static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
|
||||
|
|
|
@ -127,7 +127,7 @@ void rds_tcp_restore_callbacks(struct socket *sock,
|
|||
|
||||
/*
|
||||
* This is the only path that sets tc->t_sock. Send and receive trust that
|
||||
* it is set. The RDS_CONN_CONNECTED bit protects those paths from being
|
||||
* it is set. The RDS_CONN_UP bit protects those paths from being
|
||||
* called while it isn't set.
|
||||
*/
|
||||
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
|
||||
|
@ -216,6 +216,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
|
|||
if (!tc)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&tc->t_conn_lock);
|
||||
tc->t_sock = NULL;
|
||||
tc->t_tinc = NULL;
|
||||
tc->t_tinc_hdr_rem = sizeof(struct rds_header);
|
||||
|
|
|
@ -12,6 +12,10 @@ struct rds_tcp_connection {
|
|||
|
||||
struct list_head t_tcp_node;
|
||||
struct rds_connection *conn;
|
||||
/* t_conn_lock synchronizes the connection establishment between
|
||||
* rds_tcp_accept_one and rds_tcp_conn_connect
|
||||
*/
|
||||
struct mutex t_conn_lock;
|
||||
struct socket *t_sock;
|
||||
void *t_orig_write_space;
|
||||
void *t_orig_data_ready;
|
||||
|
|
|
@ -78,7 +78,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
|
|||
struct socket *sock = NULL;
|
||||
struct sockaddr_in src, dest;
|
||||
int ret;
|
||||
struct rds_tcp_connection *tc = conn->c_transport_data;
|
||||
|
||||
mutex_lock(&tc->t_conn_lock);
|
||||
|
||||
if (rds_conn_up(conn)) {
|
||||
mutex_unlock(&tc->t_conn_lock);
|
||||
return 0;
|
||||
}
|
||||
ret = sock_create_kern(rds_conn_net(conn), PF_INET,
|
||||
SOCK_STREAM, IPPROTO_TCP, &sock);
|
||||
if (ret < 0)
|
||||
|
@ -120,6 +127,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
|
|||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&tc->t_conn_lock);
|
||||
if (sock)
|
||||
sock_release(sock);
|
||||
return ret;
|
||||
|
|
|
@ -76,7 +76,9 @@ int rds_tcp_accept_one(struct socket *sock)
|
|||
struct rds_connection *conn;
|
||||
int ret;
|
||||
struct inet_sock *inet;
|
||||
struct rds_tcp_connection *rs_tcp;
|
||||
struct rds_tcp_connection *rs_tcp = NULL;
|
||||
int conn_state;
|
||||
struct sock *nsk;
|
||||
|
||||
ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
|
||||
sock->sk->sk_type, sock->sk->sk_protocol,
|
||||
|
@ -115,28 +117,44 @@ int rds_tcp_accept_one(struct socket *sock)
|
|||
* rds_tcp_state_change() will do that cleanup
|
||||
*/
|
||||
rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
|
||||
if (rs_tcp->t_sock &&
|
||||
ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
|
||||
struct sock *nsk = new_sock->sk;
|
||||
|
||||
rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
|
||||
mutex_lock(&rs_tcp->t_conn_lock);
|
||||
conn_state = rds_conn_state(conn);
|
||||
if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP)
|
||||
goto rst_nsk;
|
||||
if (rs_tcp->t_sock) {
|
||||
/* Need to resolve a duelling SYN between peers.
|
||||
* We have an outstanding SYN to this peer, which may
|
||||
* potentially have transitioned to the RDS_CONN_UP state,
|
||||
* so we must quiesce any send threads before resetting
|
||||
* c_transport_data.
|
||||
*/
|
||||
wait_event(conn->c_waitq,
|
||||
!test_bit(RDS_IN_XMIT, &conn->c_flags));
|
||||
if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
|
||||
goto rst_nsk;
|
||||
} else if (rs_tcp->t_sock) {
|
||||
rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
|
||||
conn->c_outgoing = 0;
|
||||
}
|
||||
}
|
||||
rds_tcp_set_callbacks(new_sock, conn);
|
||||
rds_connect_complete(conn); /* marks RDS_CONN_UP */
|
||||
new_sock = NULL;
|
||||
ret = 0;
|
||||
goto out;
|
||||
rst_nsk:
|
||||
/* reset the newly returned accept sock and bail */
|
||||
nsk = new_sock->sk;
|
||||
rds_tcp_stats_inc(s_tcp_listen_closed_stale);
|
||||
nsk->sk_user_data = NULL;
|
||||
nsk->sk_prot->disconnect(nsk, 0);
|
||||
tcp_done(nsk);
|
||||
new_sock = NULL;
|
||||
ret = 0;
|
||||
goto out;
|
||||
} else if (rs_tcp->t_sock) {
|
||||
rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
|
||||
conn->c_outgoing = 0;
|
||||
}
|
||||
|
||||
rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
|
||||
rds_tcp_set_callbacks(new_sock, conn);
|
||||
rds_connect_complete(conn);
|
||||
new_sock = NULL;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
if (rs_tcp)
|
||||
mutex_unlock(&rs_tcp->t_conn_lock);
|
||||
if (new_sock)
|
||||
sock_release(new_sock);
|
||||
return ret;
|
||||
|
|
|
@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
|||
sch->q.qlen++;
|
||||
}
|
||||
|
||||
/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
|
||||
* when we statistically choose to corrupt one, we instead segment it, returning
|
||||
* the first packet to be corrupted, and re-enqueue the remaining frames
|
||||
*/
|
||||
static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
|
||||
{
|
||||
struct sk_buff *segs;
|
||||
netdev_features_t features = netif_skb_features(skb);
|
||||
|
||||
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
||||
|
||||
if (IS_ERR_OR_NULL(segs)) {
|
||||
qdisc_reshape_fail(skb, sch);
|
||||
return NULL;
|
||||
}
|
||||
consume_skb(skb);
|
||||
return segs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert one skb into qdisc.
|
||||
* Note: parent depends on return value to account for queue length.
|
||||
|
@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
/* We don't fill cb now as skb_unshare() may invalidate it */
|
||||
struct netem_skb_cb *cb;
|
||||
struct sk_buff *skb2;
|
||||
struct sk_buff *segs = NULL;
|
||||
unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
|
||||
int nb = 0;
|
||||
int count = 1;
|
||||
int rc = NET_XMIT_SUCCESS;
|
||||
|
||||
/* Random duplication */
|
||||
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
|
||||
|
@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
* do it now in software before we mangle it.
|
||||
*/
|
||||
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
|
||||
if (skb_is_gso(skb)) {
|
||||
segs = netem_segment(skb, sch);
|
||||
if (!segs)
|
||||
return NET_XMIT_DROP;
|
||||
} else {
|
||||
segs = skb;
|
||||
}
|
||||
|
||||
skb = segs;
|
||||
segs = segs->next;
|
||||
|
||||
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
|
||||
(skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
skb_checksum_help(skb)))
|
||||
return qdisc_drop(skb, sch);
|
||||
skb_checksum_help(skb))) {
|
||||
rc = qdisc_drop(skb, sch);
|
||||
goto finish_segs;
|
||||
}
|
||||
|
||||
skb->data[prandom_u32() % skb_headlen(skb)] ^=
|
||||
1<<(prandom_u32() % 8);
|
||||
|
@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
sch->qstats.requeues++;
|
||||
}
|
||||
|
||||
finish_segs:
|
||||
if (segs) {
|
||||
while (segs) {
|
||||
skb2 = segs->next;
|
||||
segs->next = NULL;
|
||||
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
||||
last_len = segs->len;
|
||||
rc = qdisc_enqueue(segs, sch);
|
||||
if (rc != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(rc))
|
||||
qdisc_qstats_drop(sch);
|
||||
} else {
|
||||
nb++;
|
||||
len += last_len;
|
||||
}
|
||||
segs = skb2;
|
||||
}
|
||||
sch->q.qlen += nb;
|
||||
if (nb > 1)
|
||||
qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
|
||||
}
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue