Merge branch '8021ad'

Patrick McHardy says:

====================
The following patches add support for 802.1ad (provider tagging) to the
VLAN driver. The patchset consists of the following parts:

- renaming of the NET_F_HW_VLAN feature flags to indicate that they only
  operate on CTAGs

- preparation for 802.1ad VLAN filtering offload by adding a proto argument
  to the rx_{add,kill}_vid net_device_ops callbacks

- preparation of the VLAN code to support multiple protocols by making the
  protocol used for tagging a property of the VLAN device and converting
  the device lookup functions accordingly

- second step of preparation of the VLAN code by making the packet tagging
  functions take a protocol argument

- introducation of 802.1ad support in the VLAN code, consisting mainly of
  checking for ETH_P_8021AD in a couple of places and testing the netdevice
  offload feature checks to take the protocol into account

- announcement of STAG offloading capabilities in a couple of drivers for
  virtual network devices

The patchset is based on net-next.git and has been tested with single and
double tagging with and without HW acceleration (for CTAGs).
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-04-19 14:46:27 -04:00
commit 447b816fe0
101 changed files with 726 additions and 566 deletions

View File

@ -2948,7 +2948,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
nesvnic->netdev->name, vlan_tag); nesvnic->netdev->name, vlan_tag);
__vlan_hwaccel_put_tag(rx_skb, vlan_tag); __vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag);
} }
if (nes_use_lro) if (nes_use_lro)
lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);

View File

@ -1599,7 +1599,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
/* Enable/Disable VLAN Stripping */ /* Enable/Disable VLAN Stripping */
u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG); u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
u32temp &= 0xfdffffff; u32temp &= 0xfdffffff;
else else
u32temp |= 0x02000000; u32temp |= 0x02000000;
@ -1614,10 +1614,10 @@ static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_feat
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -1628,7 +1628,7 @@ static int nes_set_features(struct net_device *netdev, netdev_features_t feature
struct nes_device *nesdev = nesvnic->nesdev; struct nes_device *nesdev = nesvnic->nesdev;
u32 changed = netdev->features ^ features; u32 changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
nes_vlan_mode(netdev, nesdev, features); nes_vlan_mode(netdev, nesdev, features);
return 0; return 0;
@ -1706,11 +1706,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netdev->dev_addr[4] = (u8)(u64temp>>8); netdev->dev_addr[4] = (u8)(u64temp>>8);
netdev->dev_addr[5] = (u8)u64temp; netdev->dev_addr[5] = (u8)u64temp;
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
netdev->hw_features |= NETIF_F_TSO; netdev->hw_features |= NETIF_F_TSO;
netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX; netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_LRO; netdev->hw_features |= NETIF_F_LRO;
nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"

View File

@ -514,7 +514,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
skb->dev = client_info->slave->dev; skb->dev = client_info->slave->dev;
if (client_info->tag) { if (client_info->tag) {
skb = vlan_put_tag(skb, client_info->vlan_id); skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
if (!skb) { if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n", pr_err("%s: Error: failed to insert VLAN tag\n",
client_info->slave->bond->dev->name); client_info->slave->bond->dev->name);
@ -1014,7 +1014,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
continue; continue;
} }
skb = vlan_put_tag(skb, vlan->vlan_id); skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id);
if (!skb) { if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n", pr_err("%s: Error: failed to insert VLAN tag\n",
bond->dev->name); bond->dev->name);

View File

@ -428,14 +428,15 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
* @bond_dev: bonding net device that got called * @bond_dev: bonding net device that got called
* @vid: vlan id being added * @vid: vlan id being added
*/ */
static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{ {
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *stop_at; struct slave *slave, *stop_at;
int i, res; int i, res;
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave, i) {
res = vlan_vid_add(slave->dev, vid); res = vlan_vid_add(slave->dev, proto, vid);
if (res) if (res)
goto unwind; goto unwind;
} }
@ -453,7 +454,7 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
/* unwind from head to the slave that failed */ /* unwind from head to the slave that failed */
stop_at = slave; stop_at = slave;
bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
vlan_vid_del(slave->dev, vid); vlan_vid_del(slave->dev, proto, vid);
return res; return res;
} }
@ -463,14 +464,15 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
* @bond_dev: bonding net device that got called * @bond_dev: bonding net device that got called
* @vid: vlan id being removed * @vid: vlan id being removed
*/ */
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{ {
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave; struct slave *slave;
int i, res; int i, res;
bond_for_each_slave(bond, slave, i) bond_for_each_slave(bond, slave, i)
vlan_vid_del(slave->dev, vid); vlan_vid_del(slave->dev, proto, vid);
res = bond_del_vlan(bond, vid); res = bond_del_vlan(bond, vid);
if (res) { if (res) {
@ -488,7 +490,8 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla
int res; int res;
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
res = vlan_vid_add(slave_dev, vlan->vlan_id); res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
vlan->vlan_id);
if (res) if (res)
pr_warning("%s: Failed to add vlan id %d to device %s\n", pr_warning("%s: Failed to add vlan id %d to device %s\n",
bond->dev->name, vlan->vlan_id, bond->dev->name, vlan->vlan_id,
@ -504,7 +507,7 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
if (!vlan->vlan_id) if (!vlan->vlan_id)
continue; continue;
vlan_vid_del(slave_dev, vlan->vlan_id); vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
} }
} }
@ -779,7 +782,7 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
/* rejoin all groups on vlan devices */ /* rejoin all groups on vlan devices */
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
vlan_dev = __vlan_find_dev_deep(bond_dev, vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
vlan->vlan_id); vlan->vlan_id);
if (vlan_dev) if (vlan_dev)
__bond_resend_igmp_join_requests(vlan_dev); __bond_resend_igmp_join_requests(vlan_dev);
@ -2509,7 +2512,8 @@ static int bond_has_this_ip(struct bonding *bond, __be32 ip)
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
rcu_read_lock(); rcu_read_lock();
vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id); vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q),
vlan->vlan_id);
rcu_read_unlock(); rcu_read_unlock();
if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip)) if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip))
return 1; return 1;
@ -2538,7 +2542,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
return; return;
} }
if (vlan_id) { if (vlan_id) {
skb = vlan_put_tag(skb, vlan_id); skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (!skb) { if (!skb) {
pr_err("failed to insert VLAN tag\n"); pr_err("failed to insert VLAN tag\n");
return; return;
@ -2600,6 +2604,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
rcu_read_lock(); rcu_read_lock();
vlan_dev = __vlan_find_dev_deep(bond->dev, vlan_dev = __vlan_find_dev_deep(bond->dev,
htons(ETH_P_8021Q),
vlan->vlan_id); vlan->vlan_id);
rcu_read_unlock(); rcu_read_unlock();
if (vlan_dev == rt->dst.dev) { if (vlan_dev == rt->dst.dev) {
@ -4322,9 +4327,9 @@ static void bond_setup(struct net_device *bond_dev)
*/ */
bond_dev->hw_features = BOND_VLAN_FEATURES | bond_dev->hw_features = BOND_VLAN_FEATURES |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
bond_dev->features |= bond_dev->hw_features; bond_dev->features |= bond_dev->hw_features;

View File

@ -1690,7 +1690,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
skb_checksum_none_assert(new_skb); skb_checksum_none_assert(new_skb);
if (rx->rxStatus & TYPHOON_RX_VLAN) if (rx->rxStatus & TYPHOON_RX_VLAN)
__vlan_hwaccel_put_tag(new_skb, __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
ntohl(rx->vlanTag) & 0xffff); ntohl(rx->vlanTag) & 0xffff);
netif_receive_skb(new_skb); netif_receive_skb(new_skb);
@ -2445,9 +2445,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* settings -- so we only allow the user to toggle the TX processing. * settings -- so we only allow the user to toggle the TX processing.
*/ */
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX; NETIF_F_HW_VLAN_CTAG_TX;
dev->features = dev->hw_features | dev->features = dev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
if(register_netdev(dev) < 0) { if(register_netdev(dev) < 0) {
err_msg = "unable to register netdev"; err_msg = "unable to register netdev";

View File

@ -594,7 +594,8 @@ static const struct ethtool_ops ethtool_ops;
#ifdef VLAN_SUPPORT #ifdef VLAN_SUPPORT
static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) static int netdev_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct netdev_private *np = netdev_priv(dev); struct netdev_private *np = netdev_priv(dev);
@ -608,7 +609,8 @@ static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0; return 0;
} }
static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static int netdev_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct netdev_private *np = netdev_priv(dev); struct netdev_private *np = netdev_priv(dev);
@ -702,7 +704,7 @@ static int starfire_init_one(struct pci_dev *pdev,
#endif /* ZEROCOPY */ #endif /* ZEROCOPY */
#ifdef VLAN_SUPPORT #ifdef VLAN_SUPPORT
dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
#endif /* VLAN_RX_KILL_VID */ #endif /* VLAN_RX_KILL_VID */
#ifdef ADDR_64BITS #ifdef ADDR_64BITS
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
@ -1496,7 +1498,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
vlid); vlid);
} }
__vlan_hwaccel_put_tag(skb, vlid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
} }
#endif /* VLAN_SUPPORT */ #endif /* VLAN_SUPPORT */
netif_receive_skb(skb); netif_receive_skb(skb);

View File

@ -472,7 +472,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
ap->name = pci_name(pdev); ap->name = pci_name(pdev);
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->watchdog_timeo = 5*HZ; dev->watchdog_timeo = 5*HZ;
@ -2019,7 +2019,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
/* send it up */ /* send it up */
if ((bd_flags & BD_FLG_VLAN_TAG)) if ((bd_flags & BD_FLG_VLAN_TAG))
__vlan_hwaccel_put_tag(skb, retdesc->vlan); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
netif_rx(skb); netif_rx(skb);
dev->stats.rx_packets++; dev->stats.rx_packets++;

View File

@ -793,7 +793,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
if (vtag == TT_VLAN_TAGGED){ if (vtag == TT_VLAN_TAGGED){
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
__vlan_hwaccel_put_tag(skb, vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
} }
#endif #endif
netif_receive_skb(skb); netif_receive_skb(skb);
@ -1869,7 +1869,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ; dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
#endif #endif
lp = netdev_priv(dev); lp = netdev_priv(dev);
@ -1907,7 +1907,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
#endif #endif
/* Probe the external PHY */ /* Probe the external PHY */
amd8111e_probe_ext_phy(dev); amd8111e_probe_ext_phy(dev);

View File

@ -417,7 +417,7 @@ static void atl1c_set_multi(struct net_device *netdev)
static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{ {
if (features & NETIF_F_HW_VLAN_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */ /* enable VLAN tag insert/strip */
*mac_ctrl_data |= MAC_CTRL_RMV_VLAN; *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
} else { } else {
@ -494,10 +494,10 @@ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
if (netdev->mtu > MAX_TSO_FRAME_SIZE) if (netdev->mtu > MAX_TSO_FRAME_SIZE)
features &= ~(NETIF_F_TSO | NETIF_F_TSO6); features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
@ -510,7 +510,7 @@ static int atl1c_set_features(struct net_device *netdev,
{ {
netdev_features_t changed = netdev->features ^ features; netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl1c_vlan_mode(netdev, features); atl1c_vlan_mode(netdev, features);
return 0; return 0;
@ -1809,7 +1809,7 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
AT_TAG_TO_VLAN(rrs->vlan_tag, vlan); AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
vlan = le16_to_cpu(vlan); vlan = le16_to_cpu(vlan);
__vlan_hwaccel_put_tag(skb, vlan); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
} }
netif_receive_skb(skb); netif_receive_skb(skb);
@ -2475,13 +2475,13 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
atl1c_set_ethtool_ops(netdev); atl1c_set_ethtool_ops(netdev);
/* TODO: add when ready */ /* TODO: add when ready */
netdev->hw_features = NETIF_F_SG | netdev->hw_features = NETIF_F_SG |
NETIF_F_HW_CSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO6; NETIF_F_TSO6;
netdev->features = netdev->hw_features | netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_TX; NETIF_F_HW_VLAN_CTAG_TX;
return 0; return 0;
} }

View File

@ -315,7 +315,7 @@ static void atl1e_set_multi(struct net_device *netdev)
static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{ {
if (features & NETIF_F_HW_VLAN_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */ /* enable VLAN tag insert/strip */
*mac_ctrl_data |= MAC_CTRL_RMV_VLAN; *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
} else { } else {
@ -378,10 +378,10 @@ static netdev_features_t atl1e_fix_features(struct net_device *netdev,
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -391,7 +391,7 @@ static int atl1e_set_features(struct net_device *netdev,
{ {
netdev_features_t changed = netdev->features ^ features; netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl1e_vlan_mode(netdev, features); atl1e_vlan_mode(netdev, features);
return 0; return 0;
@ -1435,7 +1435,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
netdev_dbg(netdev, netdev_dbg(netdev,
"RXD VLAN TAG<RRD>=0x%04x\n", "RXD VLAN TAG<RRD>=0x%04x\n",
prrs->vtag); prrs->vtag);
__vlan_hwaccel_put_tag(skb, vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
} }
netif_receive_skb(skb); netif_receive_skb(skb);
@ -2198,9 +2198,9 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
atl1e_set_ethtool_ops(netdev); atl1e_set_ethtool_ops(netdev);
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features | NETIF_F_LLTX | netdev->features = netdev->hw_features | NETIF_F_LLTX |
NETIF_F_HW_VLAN_TX; NETIF_F_HW_VLAN_CTAG_TX;
return 0; return 0;
} }

View File

@ -2024,7 +2024,7 @@ static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
((rrd->vlan_tag & 7) << 13) | ((rrd->vlan_tag & 7) << 13) |
((rrd->vlan_tag & 8) << 9); ((rrd->vlan_tag & 8) << 9);
__vlan_hwaccel_put_tag(skb, vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
} }
netif_receive_skb(skb); netif_receive_skb(skb);
@ -3018,10 +3018,10 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features = NETIF_F_HW_CSUM; netdev->features = NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SG; netdev->features |= NETIF_F_SG;
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO | netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO |
NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_RX;
/* is this valid? see atl1_setup_mac_ctrl() */ /* is this valid? see atl1_setup_mac_ctrl() */
netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_RXCSUM;

View File

@ -363,7 +363,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl) static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
{ {
if (features & NETIF_F_HW_VLAN_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */ /* enable VLAN tag insert/strip */
*ctrl |= MAC_CTRL_RMV_VLAN; *ctrl |= MAC_CTRL_RMV_VLAN;
} else { } else {
@ -399,10 +399,10 @@ static netdev_features_t atl2_fix_features(struct net_device *netdev,
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -412,7 +412,7 @@ static int atl2_set_features(struct net_device *netdev,
{ {
netdev_features_t changed = netdev->features ^ features; netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl2_vlan_mode(netdev, features); atl2_vlan_mode(netdev, features);
return 0; return 0;
@ -452,7 +452,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
((rxd->status.vtag&7) << 13) | ((rxd->status.vtag&7) << 13) |
((rxd->status.vtag&8) << 9); ((rxd->status.vtag&8) << 9);
__vlan_hwaccel_put_tag(skb, vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
} }
netif_rx(skb); netif_rx(skb);
netdev->stats.rx_bytes += rx_size; netdev->stats.rx_bytes += rx_size;
@ -887,7 +887,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
skb->len-copy_len); skb->len-copy_len);
offset = ((u32)(skb->len-copy_len + 3) & ~3); offset = ((u32)(skb->len-copy_len + 3) & ~3);
} }
#ifdef NETIF_F_HW_VLAN_TX #ifdef NETIF_F_HW_VLAN_CTAG_TX
if (vlan_tx_tag_present(skb)) { if (vlan_tx_tag_present(skb)) {
u16 vlan_tag = vlan_tx_tag_get(skb); u16 vlan_tag = vlan_tx_tag_get(skb);
vlan_tag = (vlan_tag << 4) | vlan_tag = (vlan_tag << 4) |
@ -1413,8 +1413,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -EIO; err = -EIO;
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_RX; netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */ /* Init PHY as early as possible due to power saving issue */
atl2_phy_init(&adapter->hw); atl2_phy_init(&adapter->hw);

View File

@ -220,7 +220,7 @@ static void atlx_link_chg_task(struct work_struct *work)
static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl) static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
{ {
if (features & NETIF_F_HW_VLAN_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */ /* enable VLAN tag insert/strip */
*ctrl |= MAC_CTRL_RMV_VLAN; *ctrl |= MAC_CTRL_RMV_VLAN;
} else { } else {
@ -257,10 +257,10 @@ static netdev_features_t atlx_fix_features(struct net_device *netdev,
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -270,7 +270,7 @@ static int atlx_set_features(struct net_device *netdev,
{ {
netdev_features_t changed = netdev->features ^ features; netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atlx_vlan_mode(netdev, features); atlx_vlan_mode(netdev, features);
return 0; return 0;

View File

@ -3211,7 +3211,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
} }
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
skb->protocol = eth_type_trans(skb, bp->dev); skb->protocol = eth_type_trans(skb, bp->dev);
@ -3553,7 +3553,7 @@ bnx2_set_rx_mode(struct net_device *dev)
rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
if (!(dev->features & NETIF_F_HW_VLAN_RX) && if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
if (dev->flags & IFF_PROMISC) { if (dev->flags & IFF_PROMISC) {
@ -7695,7 +7695,7 @@ bnx2_fix_features(struct net_device *dev, netdev_features_t features)
struct bnx2 *bp = netdev_priv(dev); struct bnx2 *bp = netdev_priv(dev);
if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
features |= NETIF_F_HW_VLAN_RX; features |= NETIF_F_HW_VLAN_CTAG_RX;
return features; return features;
} }
@ -7706,12 +7706,12 @@ bnx2_set_features(struct net_device *dev, netdev_features_t features)
struct bnx2 *bp = netdev_priv(dev); struct bnx2 *bp = netdev_priv(dev);
/* TSO with VLAN tag won't work with current firmware */ /* TSO with VLAN tag won't work with current firmware */
if (features & NETIF_F_HW_VLAN_TX) if (features & NETIF_F_HW_VLAN_CTAG_TX)
dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO); dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
else else
dev->vlan_features &= ~NETIF_F_ALL_TSO; dev->vlan_features &= ~NETIF_F_ALL_TSO;
if ((!!(features & NETIF_F_HW_VLAN_RX) != if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
!!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
netif_running(dev)) { netif_running(dev)) {
bnx2_netif_stop(bp, false); bnx2_netif_stop(bp, false);
@ -8551,7 +8551,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
dev->vlan_features = dev->hw_features; dev->vlan_features = dev->hw_features;
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= dev->hw_features; dev->features |= dev->hw_features;
dev->priv_flags |= IFF_UNICAST_FLT; dev->priv_flags |= IFF_UNICAST_FLT;

View File

@ -719,7 +719,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
skb, cqe, cqe_idx)) { skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
bnx2x_gro_receive(bp, fp, skb); bnx2x_gro_receive(bp, fp, skb);
} else { } else {
DP(NETIF_MSG_RX_STATUS, DP(NETIF_MSG_RX_STATUS,
@ -994,7 +994,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (le16_to_cpu(cqe_fp->pars_flags.flags) & if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN) PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(cqe_fp->vlan_tag)); le16_to_cpu(cqe_fp->vlan_tag));
napi_gro_receive(&fp->napi, skb); napi_gro_receive(&fp->napi, skb);

View File

@ -12027,7 +12027,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!CHIP_IS_E1x(bp)) { if (!CHIP_IS_E1x(bp)) {
dev->hw_features |= NETIF_F_GSO_GRE; dev->hw_features |= NETIF_F_GSO_GRE;
dev->hw_enc_features = dev->hw_enc_features =
@ -12039,7 +12039,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
if (bp->flags & USING_DAC_FLAG) if (bp->flags & USING_DAC_FLAG)
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;

View File

@ -6715,7 +6715,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
if (desc->type_flags & RXD_FLAG_VLAN && if (desc->type_flags & RXD_FLAG_VLAN &&
!(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
__vlan_hwaccel_put_tag(skb, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
desc->err_vlan & RXD_VLAN_MASK); desc->err_vlan & RXD_VLAN_MASK);
napi_gro_receive(&tnapi->napi, skb); napi_gro_receive(&tnapi->napi, skb);
@ -17197,7 +17197,7 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config(tp); tg3_init_bufmgr_config(tp);
features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
/* 5700 B0 chips do not support checksumming correctly due /* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs. * to hardware bugs.

View File

@ -610,7 +610,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb->rxq->rx_bytes += length; rcb->rxq->rx_bytes += length;
if (flags & BNA_CQ_EF_VLAN) if (flags & BNA_CQ_EF_VLAN)
__vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
napi_gro_frags(&rx_ctrl->napi); napi_gro_frags(&rx_ctrl->napi);
@ -3068,8 +3068,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
} }
static int static int
bnad_vlan_rx_add_vid(struct net_device *netdev, bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
unsigned short vid)
{ {
struct bnad *bnad = netdev_priv(netdev); struct bnad *bnad = netdev_priv(netdev);
unsigned long flags; unsigned long flags;
@ -3090,8 +3089,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
} }
static int static int
bnad_vlan_rx_kill_vid(struct net_device *netdev, bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
unsigned short vid)
{ {
struct bnad *bnad = netdev_priv(netdev); struct bnad *bnad = netdev_priv(netdev);
unsigned long flags; unsigned long flags;
@ -3170,14 +3168,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX; NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6; NETIF_F_TSO | NETIF_F_TSO6;
netdev->features |= netdev->hw_features | netdev->features |= netdev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
if (using_dac) if (using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;

View File

@ -856,10 +856,10 @@ static netdev_features_t t1_fix_features(struct net_device *dev,
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -869,7 +869,7 @@ static int t1_set_features(struct net_device *dev, netdev_features_t features)
netdev_features_t changed = dev->features ^ features; netdev_features_t changed = dev->features ^ features;
struct adapter *adapter = dev->ml_priv; struct adapter *adapter = dev->ml_priv;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
t1_vlan_mode(adapter, features); t1_vlan_mode(adapter, features);
return 0; return 0;
@ -1085,8 +1085,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
if (vlan_tso_capable(adapter)) { if (vlan_tso_capable(adapter)) {
netdev->features |= netdev->features |=
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_TX |
netdev->hw_features |= NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
/* T204: disable TSO */ /* T204: disable TSO */
if (!(is_T2(adapter)) || bi->port_number != 4) { if (!(is_T2(adapter)) || bi->port_number != 4) {

View File

@ -734,7 +734,7 @@ void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
{ {
struct sge *sge = adapter->sge; struct sge *sge = adapter->sge;
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
sge->sge_control |= F_VLAN_XTRACT; sge->sge_control |= F_VLAN_XTRACT;
else else
sge->sge_control &= ~F_VLAN_XTRACT; sge->sge_control &= ~F_VLAN_XTRACT;
@ -1386,7 +1386,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
if (p->vlan_valid) { if (p->vlan_valid) {
st->vlan_xtract++; st->vlan_xtract++;
__vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
} }
netif_receive_skb(skb); netif_receive_skb(skb);
} }

View File

@ -1181,14 +1181,15 @@ static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
if (adapter->params.rev > 0) { if (adapter->params.rev > 0) {
t3_set_vlan_accel(adapter, 1 << pi->port_id, t3_set_vlan_accel(adapter, 1 << pi->port_id,
features & NETIF_F_HW_VLAN_RX); features & NETIF_F_HW_VLAN_CTAG_RX);
} else { } else {
/* single control for all ports */ /* single control for all ports */
unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX; unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
for_each_port(adapter, i) for_each_port(adapter, i)
have_vlans |= have_vlans |=
adapter->port[i]->features & NETIF_F_HW_VLAN_RX; adapter->port[i]->features &
NETIF_F_HW_VLAN_CTAG_RX;
t3_set_vlan_accel(adapter, 1, have_vlans); t3_set_vlan_accel(adapter, 1, have_vlans);
} }
@ -2563,10 +2564,10 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -2575,7 +2576,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{ {
netdev_features_t changed = dev->features ^ features; netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
cxgb_vlan_mode(dev, features); cxgb_vlan_mode(dev, features);
return 0; return 0;
@ -3288,8 +3289,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->mem_start = mmio_start; netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1; netdev->mem_end = mmio_start + mmio_len - 1;
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX; netdev->features |= netdev->hw_features |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features |= netdev->features & VLAN_FEAT; netdev->vlan_features |= netdev->features & VLAN_FEAT;
if (pci_using_dac) if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;

View File

@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
rcu_read_lock(); rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) { if (vlan && vlan != VLAN_VID_MASK) {
dev = __vlan_find_dev_deep(dev, vlan); dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
} else if (netif_is_bond_slave(dev)) { } else if (netif_is_bond_slave(dev)) {
struct net_device *upper_dev; struct net_device *upper_dev;

View File

@ -2030,7 +2030,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
if (p->vlan_valid) { if (p->vlan_valid) {
qs->port_stats[SGE_PSTAT_VLANEX]++; qs->port_stats[SGE_PSTAT_VLANEX]++;
__vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
} }
if (rq->polling) { if (rq->polling) {
if (lro) if (lro)
@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
if (cpl->vlan_valid) { if (cpl->vlan_valid) {
qs->port_stats[SGE_PSTAT_VLANEX]++; qs->port_stats[SGE_PSTAT_VLANEX]++;
__vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
} }
napi_gro_frags(&qs->napi); napi_gro_frags(&qs->napi);
} }

View File

@ -559,7 +559,7 @@ static int link_start(struct net_device *dev)
* that step explicitly. * that step explicitly.
*/ */
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
!!(dev->features & NETIF_F_HW_VLAN_RX), true); !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (ret == 0) { if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid, ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true, pi->xact_addr_filt, dev->dev_addr, true,
@ -2722,14 +2722,14 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
netdev_features_t changed = dev->features ^ features; netdev_features_t changed = dev->features ^ features;
int err; int err;
if (!(changed & NETIF_F_HW_VLAN_RX)) if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0; return 0;
err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
-1, -1, -1, -1, -1, -1,
!!(features & NETIF_F_HW_VLAN_RX), true); !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err)) if (unlikely(err))
dev->features = features ^ NETIF_F_HW_VLAN_RX; dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
return err; return err;
} }
@ -5628,7 +5628,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS | netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (highdma) if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA; netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features; netdev->features |= netdev->hw_features;

View File

@ -1633,7 +1633,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->rxhash = (__force u32)pkt->rsshdr.hash_val; skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) { if (unlikely(pkt->vlan_ex)) {
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
rxq->stats.vlan_ex++; rxq->stats.vlan_ex++;
} }
ret = napi_gro_frags(&rxq->rspq.napi); ret = napi_gro_frags(&rxq->rspq.napi);
@ -1705,7 +1705,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) { if (unlikely(pkt->vlan_ex)) {
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
rxq->stats.vlan_ex++; rxq->stats.vlan_ex++;
} }
netif_receive_skb(skb); netif_receive_skb(skb);

View File

@ -1100,10 +1100,10 @@ static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -1114,9 +1114,9 @@ static int cxgb4vf_set_features(struct net_device *dev,
struct port_info *pi = netdev_priv(dev); struct port_info *pi = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features; netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
features & NETIF_F_HW_VLAN_TX, 0); features & NETIF_F_HW_VLAN_CTAG_TX, 0);
return 0; return 0;
} }
@ -2623,11 +2623,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | TSO_FLAGS | netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
netdev->vlan_features = NETIF_F_SG | TSO_FLAGS | netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HIGHDMA; NETIF_F_HIGHDMA;
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX; netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_CTAG_TX;
if (pci_using_dac) if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;

View File

@ -1482,7 +1482,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb_record_rx_queue(skb, rxq->rspq.idx); skb_record_rx_queue(skb, rxq->rspq.idx);
if (pkt->vlan_ex) { if (pkt->vlan_ex) {
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
be16_to_cpu(pkt->vlan));
rxq->stats.vlan_ex++; rxq->stats.vlan_ex++;
} }
ret = napi_gro_frags(&rxq->rspq.napi); ret = napi_gro_frags(&rxq->rspq.napi);
@ -1551,7 +1552,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
if (pkt->vlan_ex) { if (pkt->vlan_ex) {
rxq->stats.vlan_ex++; rxq->stats.vlan_ex++;
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
} }
netif_receive_skb(skb); netif_receive_skb(skb);

View File

@ -212,7 +212,7 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
} }
/* rtnl lock is held */ /* rtnl lock is held */
int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct enic *enic = netdev_priv(netdev); struct enic *enic = netdev_priv(netdev);
int err; int err;
@ -225,7 +225,7 @@ int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
} }
/* rtnl lock is held */ /* rtnl lock is held */
int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct enic *enic = netdev_priv(netdev); struct enic *enic = netdev_priv(netdev);
int err; int err;

View File

@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti); int broadcast, int promisc, int allmulti);
int enic_dev_add_addr(struct enic *enic, u8 *addr); int enic_dev_add_addr(struct enic *enic, u8 *addr);
int enic_dev_del_addr(struct enic *enic, u8 *addr); int enic_dev_del_addr(struct enic *enic, u8 *addr);
int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid); int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int enic_dev_notify_unset(struct enic *enic); int enic_dev_notify_unset(struct enic *enic);
int enic_dev_hang_notify(struct enic *enic); int enic_dev_hang_notify(struct enic *enic);
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic); int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);

View File

@ -1300,7 +1300,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
} }
if (vlan_stripped) if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, vlan_tci); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
if (netdev->features & NETIF_F_GRO) if (netdev->features & NETIF_F_GRO)
napi_gro_receive(&enic->napi[q_number], skb); napi_gro_receive(&enic->napi[q_number], skb);
@ -2496,9 +2496,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 2 * HZ; netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops; netdev->ethtool_ops = &enic_ethtool_ops;
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (ENIC_SETTING(enic, LOOP)) { if (ENIC_SETTING(enic, LOOP)) {
netdev->features &= ~NETIF_F_HW_VLAN_TX; netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
enic->loop_enable = 1; enic->loop_enable = 1;
enic->loop_tag = enic->config.loop_tag; enic->loop_tag = enic->config.loop_tag;
dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);

View File

@ -771,7 +771,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
if (vlan_tx_tag_present(skb)) { if (vlan_tx_tag_present(skb)) {
vlan_tag = be_get_tx_vlan_tag(adapter, skb); vlan_tag = be_get_tx_vlan_tag(adapter, skb);
__vlan_put_tag(skb, vlan_tag); __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb->vlan_tci = 0; skb->vlan_tci = 0;
} }
@ -902,7 +902,7 @@ static int be_vid_config(struct be_adapter *adapter)
return status; return status;
} }
static int be_vlan_add_vid(struct net_device *netdev, u16 vid) static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
int status = 0; int status = 0;
@ -928,7 +928,7 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
return status; return status;
} }
static int be_vlan_rem_vid(struct net_device *netdev, u16 vid) static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
int status = 0; int status = 0;
@ -1383,7 +1383,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
if (rxcp->vlanf) if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
netif_receive_skb(skb); netif_receive_skb(skb);
} }
@ -1439,7 +1439,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
skb->rxhash = rxcp->rss_hash; skb->rxhash = rxcp->rss_hash;
if (rxcp->vlanf) if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
napi_gro_frags(napi); napi_gro_frags(napi);
} }
@ -3663,12 +3663,12 @@ static void be_netdev_init(struct net_device *netdev)
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_TX; NETIF_F_HW_VLAN_CTAG_TX;
if (be_multi_rxq(adapter)) if (be_multi_rxq(adapter))
netdev->hw_features |= NETIF_F_RXHASH; netdev->hw_features |= NETIF_F_RXHASH;
netdev->features |= netdev->hw_features | netdev->features |= netdev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;

View File

@ -386,7 +386,7 @@ static void gfar_init_mac(struct net_device *ndev)
priv->uses_rxfcb = 1; priv->uses_rxfcb = 1;
} }
if (ndev->features & NETIF_F_HW_VLAN_RX) { if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
priv->uses_rxfcb = 1; priv->uses_rxfcb = 1;
} }
@ -1050,8 +1050,9 @@ static int gfar_probe(struct platform_device *ofdev)
} }
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
dev->features |= NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
} }
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
@ -2348,7 +2349,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
local_irq_save(flags); local_irq_save(flags);
lock_rx_qs(priv); lock_rx_qs(priv);
if (features & NETIF_F_HW_VLAN_TX) { if (features & NETIF_F_HW_VLAN_CTAG_TX) {
/* Enable VLAN tag insertion */ /* Enable VLAN tag insertion */
tempval = gfar_read(&regs->tctrl); tempval = gfar_read(&regs->tctrl);
tempval |= TCTRL_VLINS; tempval |= TCTRL_VLINS;
@ -2360,7 +2361,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
gfar_write(&regs->tctrl, tempval); gfar_write(&regs->tctrl, tempval);
} }
if (features & NETIF_F_HW_VLAN_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* Enable VLAN tag extraction */ /* Enable VLAN tag extraction */
tempval = gfar_read(&regs->rctrl); tempval = gfar_read(&regs->rctrl);
tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
@ -2724,11 +2725,11 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Tell the skb what kind of packet this is */ /* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
/* There's need to check for NETIF_F_HW_VLAN_RX here. /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
* Even if vlan rx accel is disabled, on some chips * Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set. * RXFCB_VLN is pseudo randomly set.
*/ */
if (dev->features & NETIF_F_HW_VLAN_RX && if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
fcb->flags & RXFCB_VLN) fcb->flags & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, fcb->vlctl); __vlan_hwaccel_put_tag(skb, fcb->vlctl);

View File

@ -542,7 +542,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
int err = 0, i = 0; int err = 0, i = 0;
netdev_features_t changed = dev->features ^ features; netdev_features_t changed = dev->features ^ features;
if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
gfar_vlan_mode(dev, features); gfar_vlan_mode(dev, features);
if (!(changed & NETIF_F_RXCSUM)) if (!(changed & NETIF_F_RXCSUM))

View File

@ -2110,7 +2110,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct ehea_port *port = netdev_priv(dev); struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter; struct ehea_adapter *adapter = port->adapter;
@ -2148,7 +2148,7 @@ static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return err; return err;
} }
static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct ehea_port *port = netdev_priv(dev); struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter; struct ehea_adapter *adapter = port->adapter;
@ -3021,11 +3021,11 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
ehea_set_ethtool_ops(dev); ehea_set_ethtool_ops(dev);
dev->hw_features = NETIF_F_SG | NETIF_F_TSO dev->hw_features = NETIF_F_SG | NETIF_F_TSO
| NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX; | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
| NETIF_F_RXCSUM; | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM; NETIF_F_IP_CSUM;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;

View File

@ -166,8 +166,10 @@ static void e1000_vlan_mode(struct net_device *netdev,
netdev_features_t features); netdev_features_t features);
static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
bool filter_on); bool filter_on);
static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); static int e1000_vlan_rx_add_vid(struct net_device *netdev,
static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); __be16 proto, u16 vid);
static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter); static void e1000_restore_vlan(struct e1000_adapter *adapter);
#ifdef CONFIG_PM #ifdef CONFIG_PM
@ -333,7 +335,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
if (!test_bit(vid, adapter->active_vlans)) { if (!test_bit(vid, adapter->active_vlans)) {
if (hw->mng_cookie.status & if (hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
e1000_vlan_rx_add_vid(netdev, vid); e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
adapter->mng_vlan_id = vid; adapter->mng_vlan_id = vid;
} else { } else {
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@ -341,7 +343,8 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
(vid != old_vid) && (vid != old_vid) &&
!test_bit(old_vid, adapter->active_vlans)) !test_bit(old_vid, adapter->active_vlans))
e1000_vlan_rx_kill_vid(netdev, old_vid); e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
old_vid);
} else { } else {
adapter->mng_vlan_id = vid; adapter->mng_vlan_id = vid;
} }
@ -809,10 +812,10 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
/* Since there is no support for separate Rx/Tx vlan accel /* Since there is no support for separate Rx/Tx vlan accel
* enable/disable make sure Tx flag is always in same state as Rx. * enable/disable make sure Tx flag is always in same state as Rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -823,7 +826,7 @@ static int e1000_set_features(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features; netdev_features_t changed = features ^ netdev->features;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
e1000_vlan_mode(netdev, features); e1000_vlan_mode(netdev, features);
if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
@ -1058,9 +1061,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac_type >= e1000_82543) { if (hw->mac_type >= e1000_82543) {
netdev->hw_features = NETIF_F_SG | netdev->hw_features = NETIF_F_SG |
NETIF_F_HW_CSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = NETIF_F_HW_VLAN_TX | netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
} }
if ((hw->mac_type >= e1000_82544) && if ((hw->mac_type >= e1000_82544) &&
@ -1457,7 +1460,8 @@ static int e1000_close(struct net_device *netdev)
if ((hw->mng_cookie.status & if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
adapter->mng_vlan_id);
} }
return 0; return 0;
@ -3999,7 +4003,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
if (status & E1000_RXD_STAT_VP) { if (status & E1000_RXD_STAT_VP) {
u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
} }
napi_gro_receive(&adapter->napi, skb); napi_gro_receive(&adapter->napi, skb);
} }
@ -4785,7 +4789,7 @@ static void __e1000_vlan_mode(struct e1000_adapter *adapter,
u32 ctrl; u32 ctrl;
ctrl = er32(CTRL); ctrl = er32(CTRL);
if (features & NETIF_F_HW_VLAN_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */ /* enable VLAN tag insert/strip */
ctrl |= E1000_CTRL_VME; ctrl |= E1000_CTRL_VME;
} else { } else {
@ -4837,7 +4841,8 @@ static void e1000_vlan_mode(struct net_device *netdev,
e1000_irq_enable(adapter); e1000_irq_enable(adapter);
} }
static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) static int e1000_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
@ -4862,7 +4867,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0; return 0;
} }
static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
@ -4896,7 +4902,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
e1000_vlan_filter_on_off(adapter, true); e1000_vlan_filter_on_off(adapter, true);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
e1000_vlan_rx_add_vid(adapter->netdev, vid); e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
} }
int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)

View File

@ -554,7 +554,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
if (staterr & E1000_RXD_STAT_VP) if (staterr & E1000_RXD_STAT_VP)
__vlan_hwaccel_put_tag(skb, tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
napi_gro_receive(&adapter->napi, skb); napi_gro_receive(&adapter->napi, skb);
} }
@ -2672,7 +2672,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
return work_done; return work_done;
} }
static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) static int e1000_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
@ -2697,7 +2698,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0; return 0;
} }
static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
@ -2741,7 +2743,8 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
ew32(RCTL, rctl); ew32(RCTL, rctl);
if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
} }
} }
@ -2802,22 +2805,22 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
u16 old_vid = adapter->mng_vlan_id; u16 old_vid = adapter->mng_vlan_id;
if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
e1000_vlan_rx_add_vid(netdev, vid); e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
adapter->mng_vlan_id = vid; adapter->mng_vlan_id = vid;
} }
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
e1000_vlan_rx_kill_vid(netdev, old_vid); e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
} }
static void e1000_restore_vlan(struct e1000_adapter *adapter) static void e1000_restore_vlan(struct e1000_adapter *adapter)
{ {
u16 vid; u16 vid;
e1000_vlan_rx_add_vid(adapter->netdev, 0); e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
e1000_vlan_rx_add_vid(adapter->netdev, vid); e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
} }
static void e1000_init_manageability_pt(struct e1000_adapter *adapter) static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
@ -3373,7 +3376,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
ew32(RCTL, rctl); ew32(RCTL, rctl);
if (netdev->features & NETIF_F_HW_VLAN_RX) if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
e1000e_vlan_strip_enable(adapter); e1000e_vlan_strip_enable(adapter);
else else
e1000e_vlan_strip_disable(adapter); e1000e_vlan_strip_disable(adapter);
@ -4384,7 +4387,8 @@ static int e1000_close(struct net_device *netdev)
* the same ID is registered on the host OS (let 8021q kill it) * the same ID is registered on the host OS (let 8021q kill it)
*/ */
if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
adapter->mng_vlan_id);
/* If AMT is enabled, let the firmware know that the network /* If AMT is enabled, let the firmware know that the network
* interface is now closed * interface is now closed
@ -6418,7 +6422,7 @@ static int e1000_set_features(struct net_device *netdev,
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
adapter->flags |= FLAG_TSO_FORCE; adapter->flags |= FLAG_TSO_FORCE;
if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
NETIF_F_RXALL))) NETIF_F_RXALL)))
return 0; return 0;
@ -6629,8 +6633,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set initial default active device features */ /* Set initial default active device features */
netdev->features = (NETIF_F_SG | netdev->features = (NETIF_F_SG |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO6 |
NETIF_F_RXHASH | NETIF_F_RXHASH |
@ -6644,7 +6648,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXALL; netdev->hw_features |= NETIF_F_RXALL;
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
netdev->features |= NETIF_F_HW_VLAN_FILTER; netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= (NETIF_F_SG | netdev->vlan_features |= (NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO |

View File

@ -159,8 +159,8 @@ static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *); static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *); static void igb_reset_task(struct work_struct *);
static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
static int igb_vlan_rx_add_vid(struct net_device *, u16); static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, u16); static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *); static void igb_restore_vlan(struct igb_adapter *);
static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
static void igb_ping_all_vfs(struct igb_adapter *); static void igb_ping_all_vfs(struct igb_adapter *);
@ -1860,10 +1860,10 @@ static netdev_features_t igb_fix_features(struct net_device *netdev,
/* Since there is no support for separate Rx/Tx vlan accel /* Since there is no support for separate Rx/Tx vlan accel
* enable/disable make sure Tx flag is always in same state as Rx. * enable/disable make sure Tx flag is always in same state as Rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -1874,7 +1874,7 @@ static int igb_set_features(struct net_device *netdev,
netdev_features_t changed = netdev->features ^ features; netdev_features_t changed = netdev->features ^ features;
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
igb_vlan_mode(netdev, features); igb_vlan_mode(netdev, features);
if (!(changed & NETIF_F_RXALL)) if (!(changed & NETIF_F_RXALL))
@ -2127,15 +2127,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO6 | NETIF_F_TSO6 |
NETIF_F_RXHASH | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_TX; NETIF_F_HW_VLAN_CTAG_TX;
/* copy netdev features into list of user selectable features */ /* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features; netdev->hw_features |= netdev->features;
netdev->hw_features |= NETIF_F_RXALL; netdev->hw_features |= NETIF_F_RXALL;
/* set this bit last since it cannot be part of hw_features */ /* set this bit last since it cannot be part of hw_features */
netdev->features |= NETIF_F_HW_VLAN_FILTER; netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= NETIF_F_TSO | netdev->vlan_features |= NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO6 |
@ -6674,7 +6674,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
if ((dev->features & NETIF_F_HW_VLAN_RX) && if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid; u16 vid;
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
@ -6683,7 +6683,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
else else
vid = le16_to_cpu(rx_desc->wb.upper.vlan); vid = le16_to_cpu(rx_desc->wb.upper.vlan);
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
} }
skb_record_rx_queue(skb, rx_ring->queue_index); skb_record_rx_queue(skb, rx_ring->queue_index);
@ -6954,7 +6954,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl; u32 ctrl, rctl;
bool enable = !!(features & NETIF_F_HW_VLAN_RX); bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
if (enable) { if (enable) {
/* enable VLAN tag insert/strip */ /* enable VLAN tag insert/strip */
@ -6976,7 +6976,8 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
igb_rlpml_set(adapter); igb_rlpml_set(adapter);
} }
static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) static int igb_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
@ -6993,7 +6994,8 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0; return 0;
} }
static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) static int igb_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
@ -7019,7 +7021,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
igb_vlan_mode(adapter->netdev, adapter->netdev->features); igb_vlan_mode(adapter->netdev, adapter->netdev->features);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
igb_vlan_rx_add_vid(adapter->netdev, vid); igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
} }
int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)

View File

@ -116,7 +116,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
else else
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
if (test_bit(vid, adapter->active_vlans)) if (test_bit(vid, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
} }
napi_gro_receive(&adapter->rx_ring->napi, skb); napi_gro_receive(&adapter->rx_ring->napi, skb);
@ -1230,7 +1230,8 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
e1000_rlpml_set_vf(hw, max_frame_size); e1000_rlpml_set_vf(hw, max_frame_size);
} }
static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
@ -1243,7 +1244,8 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0; return 0;
} }
static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
@ -1262,7 +1264,7 @@ static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
u16 vid; u16 vid;
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
igbvf_vlan_rx_add_vid(adapter->netdev, vid); igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
} }
/** /**
@ -2722,9 +2724,9 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM; NETIF_F_RXCSUM;
netdev->features = netdev->hw_features | netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
if (pci_using_dac) if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;

View File

@ -101,8 +101,10 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); __be16 proto, u16 vid);
static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter); static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
@ -332,8 +334,8 @@ ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
* Tx VLAN insertion does not work per HW design when Rx stripping is * Tx VLAN insertion does not work per HW design when Rx stripping is
* disabled. * disabled.
*/ */
if (!(features & NETIF_F_HW_VLAN_RX)) if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -344,7 +346,7 @@ ixgb_set_features(struct net_device *netdev, netdev_features_t features)
struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_adapter *adapter = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features; netdev_features_t changed = features ^ netdev->features;
if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX))) if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
return 0; return 0;
adapter->rx_csum = !!(features & NETIF_F_RXCSUM); adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
@ -479,10 +481,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | netdev->hw_features = NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_HW_CSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features | netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_RXCSUM; netdev->hw_features |= NETIF_F_RXCSUM;
if (pci_using_dac) { if (pci_using_dac) {
@ -1140,7 +1142,7 @@ ixgb_set_multi(struct net_device *netdev)
} }
alloc_failed: alloc_failed:
if (netdev->features & NETIF_F_HW_VLAN_RX) if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
ixgb_vlan_strip_enable(adapter); ixgb_vlan_strip_enable(adapter);
else else
ixgb_vlan_strip_disable(adapter); ixgb_vlan_strip_disable(adapter);
@ -2080,8 +2082,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
if (status & IXGB_RX_DESC_STATUS_VP) if (status & IXGB_RX_DESC_STATUS_VP)
__vlan_hwaccel_put_tag(skb, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rx_desc->special)); le16_to_cpu(rx_desc->special));
netif_receive_skb(skb); netif_receive_skb(skb);
@ -2209,7 +2211,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
} }
static int static int
ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_adapter *adapter = netdev_priv(netdev);
u32 vfta, index; u32 vfta, index;
@ -2226,7 +2228,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
} }
static int static int
ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_adapter *adapter = netdev_priv(netdev);
u32 vfta, index; u32 vfta, index;
@ -2248,7 +2250,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
u16 vid; u16 vid;
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
ixgb_vlan_rx_add_vid(adapter->netdev, vid); ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
} }
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER

View File

@ -1488,10 +1488,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
if ((dev->features & NETIF_F_HW_VLAN_RX) && if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
} }
skb_record_rx_queue(skb, rx_ring->queue_index); skb_record_rx_queue(skb, rx_ring->queue_index);
@ -3467,7 +3467,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
hw->mac.ops.enable_rx_dma(hw, rxctrl); hw->mac.ops.enable_rx_dma(hw, rxctrl);
} }
static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
@ -3479,7 +3480,8 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0; return 0;
} }
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
@ -3584,10 +3586,10 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
{ {
u16 vid; u16 vid;
ixgbe_vlan_rx_add_vid(adapter->netdev, 0); ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
ixgbe_vlan_rx_add_vid(adapter->netdev, vid); ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
} }
/** /**
@ -3722,7 +3724,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_RX) if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
ixgbe_vlan_strip_enable(adapter); ixgbe_vlan_strip_enable(adapter);
else else
ixgbe_vlan_strip_disable(adapter); ixgbe_vlan_strip_disable(adapter);
@ -7024,7 +7026,7 @@ static int ixgbe_set_features(struct net_device *netdev,
break; break;
} }
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
ixgbe_vlan_strip_enable(adapter); ixgbe_vlan_strip_enable(adapter);
else else
ixgbe_vlan_strip_disable(adapter); ixgbe_vlan_strip_disable(adapter);
@ -7431,9 +7433,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features = NETIF_F_SG | netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO6 |
NETIF_F_RXHASH | NETIF_F_RXHASH |

View File

@ -35,7 +35,7 @@
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#ifdef NETIF_F_HW_VLAN_TX #ifdef NETIF_F_HW_VLAN_CTAG_TX
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#endif #endif

View File

@ -291,7 +291,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb); napi_gro_receive(&q_vector->napi, skb);
@ -1179,7 +1179,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
} }
} }
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
@ -1204,7 +1205,8 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return err; return err;
} }
static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
@ -1227,7 +1229,8 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
u16 vid; u16 vid;
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); ixgbevf_vlan_rx_add_vid(adapter->netdev,
htons(ETH_P_8021Q), vid);
} }
static int ixgbevf_write_uc_addr_list(struct net_device *netdev) static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
@ -3410,9 +3413,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM; NETIF_F_RXCSUM;
netdev->features = netdev->hw_features | netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_TSO6;

View File

@ -1059,7 +1059,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
u16 vid = le16_to_cpu(rxdesc->descwb.vlan); u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
NET_STAT(jme).rx_bytes += 4; NET_STAT(jme).rx_bytes += 4;
} }
jme->jme_rx(skb); jme->jme_rx(skb);
@ -3030,8 +3030,8 @@ jme_init_one(struct pci_dev *pdev,
NETIF_F_SG | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO6 |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_RX;
if (using_dac) if (using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;

View File

@ -1421,14 +1421,14 @@ static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
struct sky2_hw *hw = sky2->hw; struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port; u16 port = sky2->port;
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON); RX_VLAN_STRIP_ON);
else else
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_OFF); RX_VLAN_STRIP_OFF);
if (features & NETIF_F_HW_VLAN_TX) { if (features & NETIF_F_HW_VLAN_CTAG_TX) {
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_ON); TX_VLAN_TAG_ON);
@ -2713,7 +2713,7 @@ static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
struct sk_buff *skb; struct sk_buff *skb;
skb = sky2->rx_ring[sky2->rx_next].skb; skb = sky2->rx_ring[sky2->rx_next].skb;
__vlan_hwaccel_put_tag(skb, be16_to_cpu(length)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
} }
static void sky2_rx_hash(struct sky2_port *sky2, u32 status) static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
@ -4406,7 +4406,7 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
if (changed & NETIF_F_RXHASH) if (changed & NETIF_F_RXHASH)
rx_set_rss(dev, features); rx_set_rss(dev, features);
if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
sky2_vlan_mode(dev, features); sky2_vlan_mode(dev, features);
return 0; return 0;
@ -4793,7 +4793,8 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
dev->hw_features |= NETIF_F_RXHASH; dev->hw_features |= NETIF_F_RXHASH;
if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features |= SKY2_VLAN_OFFLOADS; dev->vlan_features |= SKY2_VLAN_OFFLOADS;
} }

View File

@ -356,7 +356,8 @@ static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
} }
#endif #endif
static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
@ -381,7 +382,8 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0; return 0;
} }
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
@ -2082,8 +2084,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
dev->features = dev->hw_features | NETIF_F_HIGHDMA | dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features |= NETIF_F_LOOPBACK; dev->hw_features |= NETIF_F_LOOPBACK;
if (mdev->dev->caps.steering_mode == if (mdev->dev->caps.steering_mode ==

View File

@ -673,7 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) { cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
u16 vid = be16_to_cpu(cqe->sl_vid); u16 vid = be16_to_cpu(cqe->sl_vid);
__vlan_hwaccel_put_tag(gro_skb, vid); __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
} }
if (dev->features & NETIF_F_RXHASH) if (dev->features & NETIF_F_RXHASH)
@ -716,7 +716,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (be32_to_cpu(cqe->vlan_my_qpn) & if (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK) MLX4_CQE_VLAN_PRESENT_MASK)
__vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
/* Push it up the stack */ /* Push it up the stack */
netif_receive_skb(skb); netif_receive_skb(skb);

View File

@ -1281,7 +1281,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
va = addr; va = addr;
va += MXGEFW_PAD; va += MXGEFW_PAD;
veh = (struct vlan_ethhdr *)va; veh = (struct vlan_ethhdr *)va;
if ((dev->features & NETIF_F_HW_VLAN_RX) == NETIF_F_HW_VLAN_RX && if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
NETIF_F_HW_VLAN_CTAG_RX &&
veh->h_vlan_proto == htons(ETH_P_8021Q)) { veh->h_vlan_proto == htons(ETH_P_8021Q)) {
/* fixup csum if needed */ /* fixup csum if needed */
if (skb->ip_summed == CHECKSUM_COMPLETE) { if (skb->ip_summed == CHECKSUM_COMPLETE) {
@ -1289,7 +1290,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
skb->csum = csum_sub(skb->csum, vsum); skb->csum = csum_sub(skb->csum, vsum);
} }
/* pop tag */ /* pop tag */
__vlan_hwaccel_put_tag(skb, ntohs(veh->h_vlan_TCI)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN); memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
skb->len -= VLAN_HLEN; skb->len -= VLAN_HLEN;
skb->data_len -= VLAN_HLEN; skb->data_len -= VLAN_HLEN;
@ -3887,8 +3888,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->mtu = myri10ge_initial_mtu; netdev->mtu = myri10ge_initial_mtu;
netdev->hw_features = mgp->features | NETIF_F_RXCSUM; netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
/* fake NETIF_F_HW_VLAN_RX for good GRO performance */ /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
netdev->hw_features |= NETIF_F_HW_VLAN_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features; netdev->features = netdev->hw_features;

View File

@ -911,7 +911,7 @@ static void rx_irq(struct net_device *ndev)
unsigned short tag; unsigned short tag;
tag = ntohs(extsts & EXTSTS_VTG_MASK); tag = ntohs(extsts & EXTSTS_VTG_MASK);
__vlan_hwaccel_put_tag(skb, tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
} }
#endif #endif
rx_rc = netif_rx(skb); rx_rc = netif_rx(skb);
@ -2193,7 +2193,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
#ifdef NS83820_VLAN_ACCEL_SUPPORT #ifdef NS83820_VLAN_ACCEL_SUPPORT
/* We also support hardware vlan acceleration */ /* We also support hardware vlan acceleration */
ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; ndev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
#endif #endif
if (using_dac) { if (using_dac) {

View File

@ -7920,7 +7920,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO; NETIF_F_RXCSUM | NETIF_F_LRO;
dev->features |= dev->hw_features | dev->features |= dev->hw_features |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (sp->device_type & XFRAME_II_DEVICE) { if (sp->device_type & XFRAME_II_DEVICE) {
dev->hw_features |= NETIF_F_UFO; dev->hw_features |= NETIF_F_UFO;
if (ufo) if (ufo)
@ -8555,7 +8555,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
if (vlan_tag && sp->vlan_strip_flag) if (vlan_tag && sp->vlan_strip_flag)
__vlan_hwaccel_put_tag(skb, vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
if (sp->config.napi) if (sp->config.napi)
netif_receive_skb(skb); netif_receive_skb(skb);
else else

View File

@ -312,7 +312,7 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
if (ext_info->vlan && if (ext_info->vlan &&
ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
__vlan_hwaccel_put_tag(skb, ext_info->vlan); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
napi_gro_receive(ring->napi_p, skb); napi_gro_receive(ring->napi_p, skb);
vxge_debug_entryexit(VXGE_TRACE, vxge_debug_entryexit(VXGE_TRACE,
@ -3300,12 +3300,13 @@ static void vxge_tx_watchdog(struct net_device *dev)
/** /**
* vxge_vlan_rx_add_vid * vxge_vlan_rx_add_vid
* @dev: net device pointer. * @dev: net device pointer.
* @proto: vlan protocol
* @vid: vid * @vid: vid
* *
* Add the vlan id to the devices vlan id table * Add the vlan id to the devices vlan id table
*/ */
static int static int
vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct vxgedev *vdev = netdev_priv(dev); struct vxgedev *vdev = netdev_priv(dev);
struct vxge_vpath *vpath; struct vxge_vpath *vpath;
@ -3323,14 +3324,15 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
} }
/** /**
* vxge_vlan_rx_add_vid * vxge_vlan_rx_kill_vid
* @dev: net device pointer. * @dev: net device pointer.
* @proto: vlan protocol
* @vid: vid * @vid: vid
* *
* Remove the vlan id from the device's vlan id table * Remove the vlan id from the device's vlan id table
*/ */
static int static int
vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct vxgedev *vdev = netdev_priv(dev); struct vxgedev *vdev = netdev_priv(dev);
struct vxge_vpath *vpath; struct vxge_vpath *vpath;
@ -3415,12 +3417,12 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_HW_VLAN_TX; NETIF_F_HW_VLAN_CTAG_TX;
if (vdev->config.rth_steering != NO_STEERING) if (vdev->config.rth_steering != NO_STEERING)
ndev->hw_features |= NETIF_F_RXHASH; ndev->hw_features |= NETIF_F_RXHASH;
ndev->features |= ndev->hw_features | ndev->features |= ndev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &vxge_netdev_ops; ndev->netdev_ops = &vxge_netdev_ops;

View File

@ -2961,15 +2961,15 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
vlanflags = le32_to_cpu(np->get_rx.ex->buflow); vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
/* /*
* There's need to check for NETIF_F_HW_VLAN_RX here. * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
* Even if vlan rx accel is disabled, * here. Even if vlan rx accel is disabled,
* NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
*/ */
if (dev->features & NETIF_F_HW_VLAN_RX && if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
vlanflags & NV_RX3_VLAN_TAG_PRESENT) { vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
} }
napi_gro_receive(&np->napi, skb); napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp); u64_stats_update_begin(&np->swstats_rx_syncp);
@ -4816,7 +4816,7 @@ static netdev_features_t nv_fix_features(struct net_device *dev,
netdev_features_t features) netdev_features_t features)
{ {
/* vlan is dependent on rx checksum offload */ /* vlan is dependent on rx checksum offload */
if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
features |= NETIF_F_RXCSUM; features |= NETIF_F_RXCSUM;
return features; return features;
@ -4828,12 +4828,12 @@ static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
else else
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
if (features & NETIF_F_HW_VLAN_TX) if (features & NETIF_F_HW_VLAN_CTAG_TX)
np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
else else
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
@ -4870,7 +4870,7 @@ static int nv_set_features(struct net_device *dev, netdev_features_t features)
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
} }
if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)) if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
nv_vlan_mode(dev, features); nv_vlan_mode(dev, features);
return 0; return 0;
@ -5705,7 +5705,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
np->vlanctl_bits = 0; np->vlanctl_bits = 0;
if (id->driver_data & DEV_HAS_VLAN) { if (id->driver_data & DEV_HAS_VLAN) {
np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
} }
dev->features |= dev->hw_features; dev->features |= dev->hw_features;
@ -5996,7 +5997,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
dev->features & NETIF_F_HIGHDMA ? "highdma " : "", dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
"csum " : "", "csum " : "",
dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX) ?
"vlan " : "", "vlan " : "",
dev->features & (NETIF_F_LOOPBACK) ? dev->features & (NETIF_F_LOOPBACK) ?
"loopback " : "", "loopback " : "",

View File

@ -1345,7 +1345,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
} }
if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
netdev->hw_features |= NETIF_F_HW_VLAN_TX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
netdev->hw_features |= NETIF_F_LRO; netdev->hw_features |= NETIF_F_LRO;

View File

@ -1050,7 +1050,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
if (vid != 0xffff) if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
napi_gro_receive(&sds_ring->napi, skb); napi_gro_receive(&sds_ring->napi, skb);
@ -1153,7 +1153,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
} }
if (vid != 0xffff) if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
netif_receive_skb(skb); netif_receive_skb(skb);
adapter->stats.lro_pkts++; adapter->stats.lro_pkts++;
@ -1518,7 +1518,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
if (vid != 0xffff) if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
napi_gro_receive(&sds_ring->napi, skb); napi_gro_receive(&sds_ring->napi, skb);
@ -1615,7 +1615,7 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
} }
if (vid != 0xffff) if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
netif_receive_skb(skb); netif_receive_skb(skb);

View File

@ -86,8 +86,8 @@ static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *); static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static void qlcnic_set_netdev_features(struct qlcnic_adapter *, static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *); struct qlcnic_esw_func_cfg *);
static int qlcnic_vlan_rx_add(struct net_device *, u16); static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
static int qlcnic_vlan_rx_del(struct net_device *, u16); static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
#define QLCNIC_IS_TSO_CAPABLE(adapter) \ #define QLCNIC_IS_TSO_CAPABLE(adapter) \
((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
@ -902,7 +902,7 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
} }
static int static int
qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_adapter *adapter = netdev_priv(netdev);
set_bit(vid, adapter->vlans); set_bit(vid, adapter->vlans);
@ -910,7 +910,7 @@ qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
} }
static int static int
qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid) qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_adapter *adapter = netdev_priv(netdev);
@ -1714,7 +1714,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_RX); NETIF_F_HW_VLAN_CTAG_RX);
netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM); NETIF_F_IPV6_CSUM);
@ -1729,7 +1729,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
} }
if (qlcnic_vlan_tx_check(adapter)) if (qlcnic_vlan_tx_check(adapter))
netdev->features |= (NETIF_F_HW_VLAN_TX); netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
netdev->features |= NETIF_F_LRO; netdev->features |= NETIF_F_LRO;
@ -3346,7 +3346,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
rcu_read_lock(); rcu_read_lock();
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
dev = __vlan_find_dev_deep(netdev, vid); dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid);
if (!dev) if (!dev)
continue; continue;
qlcnic_config_indev_addr(adapter, dev, event); qlcnic_config_indev_addr(adapter, dev, event);

View File

@ -409,7 +409,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(qdev-> (qdev->
func << CAM_OUT_FUNC_SHIFT) | func << CAM_OUT_FUNC_SHIFT) |
(0 << CAM_OUT_CQ_ID_SHIFT)); (0 << CAM_OUT_CQ_ID_SHIFT));
if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
cam_output |= CAM_OUT_RV; cam_output |= CAM_OUT_RV;
/* route to NIC core */ /* route to NIC core */
ql_write32(qdev, MAC_ADDR_DATA, cam_output); ql_write32(qdev, MAC_ADDR_DATA, cam_output);
@ -1498,7 +1498,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rx_ring->cq_id); skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff) if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, vlan_id); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
napi_gro_frags(napi); napi_gro_frags(napi);
} }
@ -1574,7 +1574,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb_record_rx_queue(skb, rx_ring->cq_id); skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff) if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, vlan_id); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY) if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
else else
@ -1670,7 +1670,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
skb_record_rx_queue(skb, rx_ring->cq_id); skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff) if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, vlan_id); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY) if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb); napi_gro_receive(&rx_ring->napi, skb);
else else
@ -1975,7 +1975,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_bytes += skb->len; rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id); skb_record_rx_queue(skb, rx_ring->cq_id);
if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
__vlan_hwaccel_put_tag(skb, vlan_id); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY) if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb); napi_gro_receive(&rx_ring->napi, skb);
else else
@ -2279,7 +2279,7 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{ {
struct ql_adapter *qdev = netdev_priv(ndev); struct ql_adapter *qdev = netdev_priv(ndev);
if (features & NETIF_F_HW_VLAN_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) {
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON); NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else { } else {
@ -2294,10 +2294,10 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_CTAG_TX;
else else
features &= ~NETIF_F_HW_VLAN_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features; return features;
} }
@ -2307,7 +2307,7 @@ static int qlge_set_features(struct net_device *ndev,
{ {
netdev_features_t changed = ndev->features ^ features; netdev_features_t changed = ndev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features); qlge_vlan_mode(ndev, features);
return 0; return 0;
@ -2326,7 +2326,7 @@ static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
return err; return err;
} }
static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{ {
struct ql_adapter *qdev = netdev_priv(ndev); struct ql_adapter *qdev = netdev_priv(ndev);
int status; int status;
@ -2357,7 +2357,7 @@ static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
return err; return err;
} }
static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{ {
struct ql_adapter *qdev = netdev_priv(ndev); struct ql_adapter *qdev = netdev_priv(ndev);
int status; int status;
@ -4665,9 +4665,9 @@ static int qlge_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(ndev, &pdev->dev); SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
ndev->features = ndev->hw_features | ndev->features = ndev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->vlan_features = ndev->hw_features; ndev->vlan_features = ndev->hw_features;
if (test_bit(QL_DMA64, &qdev->flags)) if (test_bit(QL_DMA64, &qdev->flags))

View File

@ -431,7 +431,7 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
cp->dev->stats.rx_bytes += skb->len; cp->dev->stats.rx_bytes += skb->len;
if (opts2 & RxVlanTagged) if (opts2 & RxVlanTagged)
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
napi_gro_receive(&cp->napi, skb); napi_gro_receive(&cp->napi, skb);
} }
@ -1438,7 +1438,7 @@ static int cp_set_features(struct net_device *dev, netdev_features_t features)
else else
cp->cpcmd &= ~RxChkSum; cp->cpcmd &= ~RxChkSum;
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
cp->cpcmd |= RxVlanOn; cp->cpcmd |= RxVlanOn;
else else
cp->cpcmd &= ~RxVlanOn; cp->cpcmd &= ~RxVlanOn;
@ -1955,14 +1955,14 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &cp_ethtool_ops; dev->ethtool_ops = &cp_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT; dev->watchdog_timeo = TX_TIMEOUT;
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (pci_using_dac) if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
/* disabled by default until verified */ /* disabled by default until verified */
dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA; NETIF_F_HIGHDMA;

View File

@ -1793,16 +1793,17 @@ static void __rtl8169_set_features(struct net_device *dev,
netdev_features_t changed = features ^ dev->features; netdev_features_t changed = features ^ dev->features;
void __iomem *ioaddr = tp->mmio_addr; void __iomem *ioaddr = tp->mmio_addr;
if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX))) if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_RX)))
return; return;
if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) { if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_RXCSUM) if (features & NETIF_F_RXCSUM)
tp->cp_cmd |= RxChkSum; tp->cp_cmd |= RxChkSum;
else else
tp->cp_cmd &= ~RxChkSum; tp->cp_cmd &= ~RxChkSum;
if (dev->features & NETIF_F_HW_VLAN_RX) if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
tp->cp_cmd |= RxVlan; tp->cp_cmd |= RxVlan;
else else
tp->cp_cmd &= ~RxVlan; tp->cp_cmd &= ~RxVlan;
@ -1842,7 +1843,7 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
u32 opts2 = le32_to_cpu(desc->opts2); u32 opts2 = le32_to_cpu(desc->opts2);
if (opts2 & RxVlanTag) if (opts2 & RxVlanTag)
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
} }
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@ -7036,16 +7037,17 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* don't enable SG, IP_CSUM and TSO by default - it might not work /* don't enable SG, IP_CSUM and TSO by default - it might not work
* properly for all devices */ * properly for all devices */
dev->features |= NETIF_F_RXCSUM | dev->features |= NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA; NETIF_F_HIGHDMA;
if (tp->mac_version == RTL_GIGA_MAC_VER_05) if (tp->mac_version == RTL_GIGA_MAC_VER_05)
/* 8110SCd requires hardware Rx VLAN - disallow toggling */ /* 8110SCd requires hardware Rx VLAN - disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_RX; dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS; dev->hw_features |= NETIF_F_RXFCS;

View File

@ -2448,7 +2448,8 @@ static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
return TSU_VTAG1; return TSU_VTAG1;
} }
static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid) static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{ {
struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_private *mdp = netdev_priv(ndev);
int vtag_reg_index = sh_eth_get_vtag_index(mdp); int vtag_reg_index = sh_eth_get_vtag_index(mdp);
@ -2478,7 +2479,8 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
return 0; return 0;
} }
static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{ {
struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_private *mdp = netdev_priv(ndev);
int vtag_reg_index = sh_eth_get_vtag_index(mdp); int vtag_reg_index = sh_eth_get_vtag_index(mdp);
@ -2749,7 +2751,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release; goto out_release;
} }
mdp->port = devno % 2; mdp->port = devno % 2;
ndev->features = NETIF_F_HW_VLAN_FILTER; ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
} }
/* initialize first or needed device */ /* initialize first or needed device */

View File

@ -2679,7 +2679,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED #ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */ /* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_RX; ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
#endif #endif
priv->msg_enable = netif_msg_init(debug, default_msg_level); priv->msg_enable = netif_msg_init(debug, default_msg_level);

View File

@ -733,7 +733,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
* @ndev: network device * @ndev: network device
* @vid: VLAN vid to add * @vid: VLAN vid to add
*/ */
static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{ {
__bdx_vlan_rx_vid(ndev, vid, 1); __bdx_vlan_rx_vid(ndev, vid, 1);
return 0; return 0;
@ -744,7 +744,7 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
* @ndev: network device * @ndev: network device
* @vid: VLAN vid to kill * @vid: VLAN vid to kill
*/ */
static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{ {
__bdx_vlan_rx_vid(ndev, vid, 0); __bdx_vlan_rx_vid(ndev, vid, 0);
return 0; return 0;
@ -1148,7 +1148,7 @@ NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
priv->ndev->name, priv->ndev->name,
GET_RXD_VLAN_ID(rxd_vlan), GET_RXD_VLAN_ID(rxd_vlan),
GET_RXD_VTAG(rxd_val1)); GET_RXD_VTAG(rxd_val1));
__vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
} }
netif_receive_skb(skb); netif_receive_skb(skb);
} }
@ -2017,12 +2017,12 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* so we can have them same for all ports of the board */ * so we can have them same for all ports of the board */
ndev->if_port = port; ndev->if_port = port;
ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
| NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
/*| NETIF_F_FRAGLIST */ /*| NETIF_F_FRAGLIST */
; ;
ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_HW_VLAN_TX; NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
if (pci_using_dac) if (pci_using_dac)
ndev->features |= NETIF_F_HIGHDMA; ndev->features |= NETIF_F_HIGHDMA;

View File

@ -1251,7 +1251,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
} }
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
unsigned short vid) __be16 proto, u16 vid)
{ {
struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_priv *priv = netdev_priv(ndev);
@ -1263,7 +1263,7 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
} }
static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
unsigned short vid) __be16 proto, u16 vid)
{ {
struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_priv *priv = netdev_priv(ndev);
int ret; int ret;
@ -1599,7 +1599,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->num_irqs = priv->num_irqs; priv_sl2->num_irqs = priv->num_irqs;
} }
ndev->features |= NETIF_F_HW_VLAN_FILTER; ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops; ndev->netdev_ops = &cpsw_netdev_ops;
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@ -1837,7 +1837,7 @@ static int cpsw_probe(struct platform_device *pdev)
k++; k++;
} }
ndev->features |= NETIF_F_HW_VLAN_FILTER; ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops; ndev->netdev_ops = &cpsw_netdev_ops;
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);

View File

@ -2329,8 +2329,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
if (SPIDER_NET_RX_CSUM_DEFAULT) if (SPIDER_NET_RX_CSUM_DEFAULT)
netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
/* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
* NETIF_F_HW_VLAN_FILTER */ * NETIF_F_HW_VLAN_CTAG_FILTER */
netdev->irq = card->pdev->irq; netdev->irq = card->pdev->irq;
card->num_rx_ints = 0; card->num_rx_ints = 0;

View File

@ -508,8 +508,10 @@ static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops; static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev); static int rhine_close(struct net_device *dev);
static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); static int rhine_vlan_rx_add_vid(struct net_device *dev,
static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); __be16 proto, u16 vid);
static int rhine_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid);
static void rhine_restart_tx(struct net_device *dev); static void rhine_restart_tx(struct net_device *dev);
static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
@ -1026,8 +1028,9 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
if (pdev->revision >= VT6105M) if (pdev->revision >= VT6105M)
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
/* dev->name not defined before register_netdev()! */ /* dev->name not defined before register_netdev()! */
rc = register_netdev(dev); rc = register_netdev(dev);
@ -1414,7 +1417,7 @@ static void rhine_update_vcam(struct net_device *dev)
rhine_set_vlan_cam_mask(ioaddr, vCAMmask); rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
} }
static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct rhine_private *rp = netdev_priv(dev); struct rhine_private *rp = netdev_priv(dev);
@ -1425,7 +1428,7 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0; return 0;
} }
static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct rhine_private *rp = netdev_priv(dev); struct rhine_private *rp = netdev_priv(dev);
@ -1933,7 +1936,7 @@ static int rhine_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
if (unlikely(desc_length & DescTag)) if (unlikely(desc_length & DescTag))
__vlan_hwaccel_put_tag(skb, vlan_tci); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
netif_receive_skb(skb); netif_receive_skb(skb);
u64_stats_update_begin(&rp->rx_stats.syncp); u64_stats_update_begin(&rp->rx_stats.syncp);

View File

@ -525,7 +525,8 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
mac_set_vlan_cam_mask(regs, vptr->vCAMmask); mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
} }
static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) static int velocity_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct velocity_info *vptr = netdev_priv(dev); struct velocity_info *vptr = netdev_priv(dev);
@ -536,7 +537,8 @@ static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0; return 0;
} }
static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static int velocity_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct velocity_info *vptr = netdev_priv(dev); struct velocity_info *vptr = netdev_priv(dev);
@ -2078,7 +2080,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
if (rd->rdesc0.RSR & RSR_DETAG) { if (rd->rdesc0.RSR & RSR_DETAG) {
u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
} }
netif_rx(skb); netif_rx(skb);
@ -2810,9 +2812,10 @@ static int velocity_found1(struct pci_dev *pdev,
dev->ethtool_ops = &velocity_ethtool_ops; dev->ethtool_ops = &velocity_ethtool_ops;
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX; dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM;
ret = register_netdev(dev); ret = register_netdev(dev);
if (ret < 0) if (ret < 0)

View File

@ -1018,9 +1018,9 @@ static int temac_of_probe(struct platform_device *op)
ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */ ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */ ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */ ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */ ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
ndev->features |= NETIF_F_GSO; /* Enable software GSO. */ ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */ ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */

View File

@ -431,7 +431,7 @@ static int netvsc_probe(struct hv_device *dev,
/* TODO: Add GSO and Checksum offload */ /* TODO: Add GSO and Checksum offload */
net->hw_features = NETIF_F_SG; net->hw_features = NETIF_F_SG;
net->features = NETIF_F_SG | NETIF_F_HW_VLAN_TX; net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX;
SET_ETHTOOL_OPS(net, &ethtool_ops); SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device); SET_NETDEV_DEV(net, &dev->device);

View File

@ -166,7 +166,8 @@ static const struct net_device_ops ifb_netdev_ops = {
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \ NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX) NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_TX)
static void ifb_setup(struct net_device *dev) static void ifb_setup(struct net_device *dev)
{ {

View File

@ -471,7 +471,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_FILTER) NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
#define MACVLAN_STATE_MASK \ #define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@ -567,21 +567,21 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
} }
static int macvlan_vlan_rx_add_vid(struct net_device *dev, static int macvlan_vlan_rx_add_vid(struct net_device *dev,
unsigned short vid) __be16 proto, u16 vid)
{ {
struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev; struct net_device *lowerdev = vlan->lowerdev;
return vlan_vid_add(lowerdev, vid); return vlan_vid_add(lowerdev, proto, vid);
} }
static int macvlan_vlan_rx_kill_vid(struct net_device *dev, static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
unsigned short vid) __be16 proto, u16 vid)
{ {
struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev; struct net_device *lowerdev = vlan->lowerdev;
vlan_vid_del(lowerdev, vid); vlan_vid_del(lowerdev, proto, vid);
return 0; return 0;
} }

View File

@ -1598,7 +1598,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
return stats; return stats;
} }
static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct team *team = netdev_priv(dev); struct team *team = netdev_priv(dev);
struct team_port *port; struct team_port *port;
@ -1610,7 +1610,7 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
*/ */
mutex_lock(&team->lock); mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) { list_for_each_entry(port, &team->port_list, list) {
err = vlan_vid_add(port->dev, vid); err = vlan_vid_add(port->dev, proto, vid);
if (err) if (err)
goto unwind; goto unwind;
} }
@ -1620,20 +1620,20 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
unwind: unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list) list_for_each_entry_continue_reverse(port, &team->port_list, list)
vlan_vid_del(port->dev, vid); vlan_vid_del(port->dev, proto, vid);
mutex_unlock(&team->lock); mutex_unlock(&team->lock);
return err; return err;
} }
static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct team *team = netdev_priv(dev); struct team *team = netdev_priv(dev);
struct team_port *port; struct team_port *port;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list) list_for_each_entry_rcu(port, &team->port_list, list)
vlan_vid_del(port->dev, vid); vlan_vid_del(port->dev, proto, vid);
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
@ -1841,9 +1841,9 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO; dev->features |= NETIF_F_GRO;
dev->hw_features = TEAM_VLAN_FEATURES | dev->hw_features = TEAM_VLAN_FEATURES |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
dev->features |= dev->hw_features; dev->features |= dev->hw_features;

View File

@ -101,7 +101,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->flags |= IFF_NOARP; dev->net->flags |= IFF_NOARP;
/* no need to put the VLAN tci in the packet headers */ /* no need to put the VLAN tci in the packet headers */
dev->net->features |= NETIF_F_HW_VLAN_TX; dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX;
err: err:
return ret; return ret;
} }
@ -221,7 +221,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
/* map MBIM session to VLAN */ /* map MBIM session to VLAN */
if (tci) if (tci)
vlan_put_tag(skb, tci); vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
err: err:
return skb; return skb;
} }

View File

@ -255,7 +255,8 @@ static const struct net_device_ops veth_netdev_ops = {
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \ NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
static void veth_setup(struct net_device *dev) static void veth_setup(struct net_device *dev)
{ {

View File

@ -1006,7 +1006,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
kfree(buf); kfree(buf);
} }
static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg; struct scatterlist sg;
@ -1019,7 +1020,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
return 0; return 0;
} }
static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg; struct scatterlist sg;
@ -1376,7 +1378,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
if (vi->has_cvq) { if (vi->has_cvq) {
vi->cvq = vqs[total_vqs - 1]; vi->cvq = vqs[total_vqs - 1];
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
vi->dev->features |= NETIF_F_HW_VLAN_FILTER; vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
} }
for (i = 0; i < vi->max_queue_pairs; i++) { for (i = 0; i < vi->max_queue_pairs; i++) {

View File

@ -1293,7 +1293,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skb->protocol = eth_type_trans(skb, adapter->netdev); skb->protocol = eth_type_trans(skb, adapter->netdev);
if (unlikely(rcd->ts)) if (unlikely(rcd->ts))
__vlan_hwaccel_put_tag(skb, rcd->tci); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
if (adapter->netdev->features & NETIF_F_LRO) if (adapter->netdev->features & NETIF_F_LRO)
netif_receive_skb(skb); netif_receive_skb(skb);
@ -1931,7 +1931,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
static int static int
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@ -1953,7 +1953,7 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
static int static int
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ {
struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@ -2107,7 +2107,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.uptFeatures |= UPT1_F_LRO; devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
} }
if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN; devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@ -2669,14 +2669,15 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_LRO; NETIF_F_LRO;
if (dma64) if (dma64)
netdev->hw_features |= NETIF_F_HIGHDMA; netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->hw_features & netdev->vlan_features = netdev->hw_features &
~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); ~(NETIF_F_HW_VLAN_CTAG_TX |
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_RX);
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
} }

View File

@ -263,7 +263,8 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
unsigned long flags; unsigned long flags;
netdev_features_t changed = features ^ netdev->features; netdev_features_t changed = features ^ netdev->features;
if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) { if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO |
NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_RXCSUM) if (features & NETIF_F_RXCSUM)
adapter->shared->devRead.misc.uptFeatures |= adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXCSUM; UPT1_F_RXCSUM;
@ -279,7 +280,7 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
adapter->shared->devRead.misc.uptFeatures &= adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_LRO; ~UPT1_F_LRO;
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
adapter->shared->devRead.misc.uptFeatures |= adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXVLAN; UPT1_F_RXVLAN;
else else

View File

@ -302,7 +302,8 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
spin_unlock_bh(&card->vlanlock); spin_unlock_bh(&card->vlanlock);
} }
static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct qeth_card *card = dev->ml_priv; struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id; struct qeth_vlan_vid *id;
@ -331,7 +332,8 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0; return 0;
} }
static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct qeth_vlan_vid *id, *tmpid = NULL; struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv; struct qeth_card *card = dev->ml_priv;
@ -959,7 +961,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops); SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
else else
SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops); SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
card->dev->features |= NETIF_F_HW_VLAN_FILTER; card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
card->info.broadcast_capable = 1; card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card); qeth_l2_request_initial_mac(card);
SET_NETDEV_DEV(card->dev, &card->gdev->dev); SET_NETDEV_DEV(card->dev, &card->gdev->dev);

View File

@ -1824,7 +1824,8 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
rcu_read_unlock(); rcu_read_unlock();
} }
static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct qeth_card *card = dev->ml_priv; struct qeth_card *card = dev->ml_priv;
@ -1832,7 +1833,8 @@ static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0; return 0;
} }
static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{ {
struct qeth_card *card = dev->ml_priv; struct qeth_card *card = dev->ml_priv;
unsigned long flags; unsigned long flags;
@ -3294,9 +3296,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->watchdog_timeo = QETH_TX_TIMEOUT; card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu; card->dev->mtu = card->info.initial_mtu;
SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
card->dev->features |= NETIF_F_HW_VLAN_TX | card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
card->dev->gso_max_size = 15 * PAGE_SIZE; card->dev->gso_max_size = 15 * PAGE_SIZE;

View File

@ -1655,7 +1655,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb->priority = fcoe->priority; skb->priority = fcoe->priority;
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
skb->vlan_tci = VLAN_TAG_PRESENT | skb->vlan_tci = VLAN_TAG_PRESENT |
vlan_dev_vlan_id(fcoe->netdev); vlan_dev_vlan_id(fcoe->netdev);
skb->dev = fcoe->realdev; skb->dev = fcoe->realdev;

View File

@ -86,15 +86,15 @@ static inline int is_vlan_dev(struct net_device *dev)
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
u16 vlan_id); __be16 vlan_proto, u16 vlan_id);
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern bool vlan_do_receive(struct sk_buff **skb); extern bool vlan_do_receive(struct sk_buff **skb);
extern struct sk_buff *vlan_untag(struct sk_buff *skb); extern struct sk_buff *vlan_untag(struct sk_buff *skb);
extern int vlan_vid_add(struct net_device *dev, unsigned short vid); extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
extern void vlan_vid_del(struct net_device *dev, unsigned short vid); extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
extern int vlan_vids_add_by_dev(struct net_device *dev, extern int vlan_vids_add_by_dev(struct net_device *dev,
const struct net_device *by_dev); const struct net_device *by_dev);
@ -157,9 +157,20 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
} }
#endif #endif
static inline bool vlan_hw_offload_capable(netdev_features_t features,
__be16 proto)
{
if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
return true;
if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
return true;
return false;
}
/** /**
* vlan_insert_tag - regular VLAN tag inserting * vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag * @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert * @vlan_tci: VLAN TCI to insert
* *
* Inserts the VLAN tag into @skb as part of the payload * Inserts the VLAN tag into @skb as part of the payload
@ -170,7 +181,8 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
* *
* Does not change skb->protocol so this function can be used during receive. * Does not change skb->protocol so this function can be used during receive.
*/ */
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci) static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{ {
struct vlan_ethhdr *veth; struct vlan_ethhdr *veth;
@ -185,7 +197,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
skb->mac_header -= VLAN_HLEN; skb->mac_header -= VLAN_HLEN;
/* first, the ethernet type */ /* first, the ethernet type */
veth->h_vlan_proto = htons(ETH_P_8021Q); veth->h_vlan_proto = vlan_proto;
/* now, the TCI */ /* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci); veth->h_vlan_TCI = htons(vlan_tci);
@ -204,24 +216,28 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
* Following the skb_unshare() example, in case of error, the calling function * Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb. * doesn't have to worry about freeing the original skb.
*/ */
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{ {
skb = vlan_insert_tag(skb, vlan_tci); skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
if (skb) if (skb)
skb->protocol = htons(ETH_P_8021Q); skb->protocol = vlan_proto;
return skb; return skb;
} }
/** /**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag * @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert * @vlan_tci: VLAN TCI to insert
* *
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
*/ */
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
__be16 vlan_proto,
u16 vlan_tci) u16 vlan_tci)
{ {
skb->vlan_proto = vlan_proto;
skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
return skb; return skb;
} }
@ -236,12 +252,13 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
* Assumes skb->dev is the target that will xmit this frame. * Assumes skb->dev is the target that will xmit this frame.
* Returns a VLAN tagged skb. * Returns a VLAN tagged skb.
*/ */
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{ {
if (skb->dev->features & NETIF_F_HW_VLAN_TX) { if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) {
return __vlan_hwaccel_put_tag(skb, vlan_tci); return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
} else { } else {
return __vlan_put_tag(skb, vlan_tci); return __vlan_put_tag(skb, vlan_proto, vlan_tci);
} }
} }
@ -256,9 +273,9 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{ {
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
if (veth->h_vlan_proto != htons(ETH_P_8021Q)) { if (veth->h_vlan_proto != htons(ETH_P_8021Q) &&
veth->h_vlan_proto != htons(ETH_P_8021AD))
return -EINVAL; return -EINVAL;
}
*vlan_tci = ntohs(veth->h_vlan_TCI); *vlan_tci = ntohs(veth->h_vlan_TCI);
return 0; return 0;
@ -294,7 +311,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
*/ */
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{ {
if (skb->dev->features & NETIF_F_HW_VLAN_TX) { if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
return __vlan_hwaccel_get_tag(skb, vlan_tci); return __vlan_hwaccel_get_tag(skb, vlan_tci);
} else { } else {
return __vlan_get_tag(skb, vlan_tci); return __vlan_get_tag(skb, vlan_tci);

View File

@ -22,9 +22,12 @@ enum {
NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */
NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */
NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */ NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */
NETIF_F_HW_VLAN_TX_BIT, /* Transmit VLAN hw acceleration */ NETIF_F_HW_VLAN_CTAG_TX_BIT, /* Transmit VLAN CTAG HW acceleration */
NETIF_F_HW_VLAN_RX_BIT, /* Receive VLAN hw acceleration */ NETIF_F_HW_VLAN_CTAG_RX_BIT, /* Receive VLAN CTAG HW acceleration */
NETIF_F_HW_VLAN_FILTER_BIT, /* Receive filtering on VLAN */ NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */
NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */
NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */
NETIF_F_GSO_BIT, /* Enable software GSO. */ NETIF_F_GSO_BIT, /* Enable software GSO. */
NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */
@ -80,9 +83,12 @@ enum {
#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) #define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST)
#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) #define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA)
#define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM) #define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM)
#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER) #define NETIF_F_HW_VLAN_CTAG_FILTER __NETIF_F(HW_VLAN_CTAG_FILTER)
#define NETIF_F_HW_VLAN_RX __NETIF_F(HW_VLAN_RX) #define NETIF_F_HW_VLAN_CTAG_RX __NETIF_F(HW_VLAN_CTAG_RX)
#define NETIF_F_HW_VLAN_TX __NETIF_F(HW_VLAN_TX) #define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) #define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM)
#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) #define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM)
#define NETIF_F_LLTX __NETIF_F(LLTX) #define NETIF_F_LLTX __NETIF_F(LLTX)

View File

@ -784,13 +784,13 @@ struct netdev_fcoe_hbainfo {
* 3. Update dev->stats asynchronously and atomically, and define * 3. Update dev->stats asynchronously and atomically, and define
* neither operation. * neither operation.
* *
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) * If device support VLAN filtering this function is called when a
* this function is called when a VLAN id is registered. * VLAN id is registered.
* *
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) * If device support VLAN filtering this function is called when a
* this function is called when a VLAN id is unregistered. * VLAN id is unregistered.
* *
* void (*ndo_poll_controller)(struct net_device *dev); * void (*ndo_poll_controller)(struct net_device *dev);
* *
@ -934,9 +934,9 @@ struct net_device_ops {
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
int (*ndo_vlan_rx_add_vid)(struct net_device *dev, int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid); __be16 proto, u16 vid);
int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid); __be16 proto, u16 vid);
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev); void (*ndo_poll_controller)(struct net_device *dev);
int (*ndo_netpoll_setup)(struct net_device *dev, int (*ndo_netpoll_setup)(struct net_device *dev,

View File

@ -387,6 +387,7 @@ typedef unsigned char *sk_buff_data_t;
* @secmark: security marking * @secmark: security marking
* @mark: Generic packet mark * @mark: Generic packet mark
* @dropcount: total number of sk_receive_queue overflows * @dropcount: total number of sk_receive_queue overflows
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information * @vlan_tci: vlan tag control information
* @inner_transport_header: Inner transport layer header (encapsulation) * @inner_transport_header: Inner transport layer header (encapsulation)
* @inner_network_header: Network layer header (encapsulation) * @inner_network_header: Network layer header (encapsulation)
@ -465,6 +466,7 @@ struct sk_buff {
__u32 rxhash; __u32 rxhash;
__be16 vlan_proto;
__u16 vlan_tci; __u16 vlan_tci;
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED

View File

@ -250,6 +250,7 @@ enum {
IFLA_VLAN_FLAGS, IFLA_VLAN_FLAGS,
IFLA_VLAN_EGRESS_QOS, IFLA_VLAN_EGRESS_QOS,
IFLA_VLAN_INGRESS_QOS, IFLA_VLAN_INGRESS_QOS,
IFLA_VLAN_PROTOCOL,
__IFLA_VLAN_MAX, __IFLA_VLAN_MAX,
}; };

View File

@ -3,7 +3,7 @@
# #
config VLAN_8021Q config VLAN_8021Q
tristate "802.1Q VLAN Support" tristate "802.1Q/802.1ad VLAN Support"
---help--- ---help---
Select this and you will be able to create 802.1Q VLAN interfaces Select this and you will be able to create 802.1Q VLAN interfaces
on your ethernet interfaces. 802.1Q VLAN supports almost on your ethernet interfaces. 802.1Q VLAN supports almost

View File

@ -51,14 +51,18 @@ const char vlan_version[] = DRV_VERSION;
/* End of global variables definitions. */ /* End of global variables definitions. */
static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) static int vlan_group_prealloc_vid(struct vlan_group *vg,
__be16 vlan_proto, u16 vlan_id)
{ {
struct net_device **array; struct net_device **array;
unsigned int pidx, vidx;
unsigned int size; unsigned int size;
ASSERT_RTNL(); ASSERT_RTNL();
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; pidx = vlan_proto_idx(vlan_proto);
vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
array = vg->vlan_devices_arrays[pidx][vidx];
if (array != NULL) if (array != NULL)
return 0; return 0;
@ -67,7 +71,7 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
if (array == NULL) if (array == NULL)
return -ENOBUFS; return -ENOBUFS;
vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array; vg->vlan_devices_arrays[pidx][vidx] = array;
return 0; return 0;
} }
@ -93,7 +97,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
if (vlan->flags & VLAN_FLAG_GVRP) if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_leave(dev); vlan_gvrp_request_leave(dev);
vlan_group_set_device(grp, vlan_id, NULL); vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
/* Because unregister_netdevice_queue() makes sure at least one rcu /* Because unregister_netdevice_queue() makes sure at least one rcu
* grace period is respected before device freeing, * grace period is respected before device freeing,
* we dont need to call synchronize_net() here. * we dont need to call synchronize_net() here.
@ -112,13 +116,14 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
* VLAN is not 0 (leave it there for 802.1p). * VLAN is not 0 (leave it there for 802.1p).
*/ */
if (vlan_id) if (vlan_id)
vlan_vid_del(real_dev, vlan_id); vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
/* Get rid of the vlan's reference to real_dev */ /* Get rid of the vlan's reference to real_dev */
dev_put(real_dev); dev_put(real_dev);
} }
int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) int vlan_check_real_dev(struct net_device *real_dev,
__be16 protocol, u16 vlan_id)
{ {
const char *name = real_dev->name; const char *name = real_dev->name;
@ -127,7 +132,7 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (vlan_find_dev(real_dev, vlan_id) != NULL) if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL)
return -EEXIST; return -EEXIST;
return 0; return 0;
@ -142,7 +147,7 @@ int register_vlan_dev(struct net_device *dev)
struct vlan_group *grp; struct vlan_group *grp;
int err; int err;
err = vlan_vid_add(real_dev, vlan_id); err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
if (err) if (err)
return err; return err;
@ -160,7 +165,7 @@ int register_vlan_dev(struct net_device *dev)
goto out_uninit_gvrp; goto out_uninit_gvrp;
} }
err = vlan_group_prealloc_vid(grp, vlan_id); err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
if (err < 0) if (err < 0)
goto out_uninit_mvrp; goto out_uninit_mvrp;
@ -181,7 +186,7 @@ int register_vlan_dev(struct net_device *dev)
/* So, got the sucker initialized, now lets place /* So, got the sucker initialized, now lets place
* it into our local structure. * it into our local structure.
*/ */
vlan_group_set_device(grp, vlan_id, dev); vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
grp->nr_vlan_devs++; grp->nr_vlan_devs++;
return 0; return 0;
@ -195,7 +200,7 @@ int register_vlan_dev(struct net_device *dev)
if (grp->nr_vlan_devs == 0) if (grp->nr_vlan_devs == 0)
vlan_gvrp_uninit_applicant(real_dev); vlan_gvrp_uninit_applicant(real_dev);
out_vid_del: out_vid_del:
vlan_vid_del(real_dev, vlan_id); vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
return err; return err;
} }
@ -213,7 +218,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
if (vlan_id >= VLAN_VID_MASK) if (vlan_id >= VLAN_VID_MASK)
return -ERANGE; return -ERANGE;
err = vlan_check_real_dev(real_dev, vlan_id); err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id);
if (err < 0) if (err < 0)
return err; return err;
@ -255,6 +260,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
new_dev->mtu = real_dev->mtu; new_dev->mtu = real_dev->mtu;
new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q);
vlan_dev_priv(new_dev)->vlan_id = vlan_id; vlan_dev_priv(new_dev)->vlan_id = vlan_id;
vlan_dev_priv(new_dev)->real_dev = real_dev; vlan_dev_priv(new_dev)->real_dev = real_dev;
vlan_dev_priv(new_dev)->dent = NULL; vlan_dev_priv(new_dev)->dent = NULL;
@ -301,7 +307,7 @@ static void vlan_transfer_features(struct net_device *dev,
{ {
vlandev->gso_max_size = dev->gso_max_size; vlandev->gso_max_size = dev->gso_max_size;
if (dev->features & NETIF_F_HW_VLAN_TX) if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
vlandev->hard_header_len = dev->hard_header_len; vlandev->hard_header_len = dev->hard_header_len;
else else
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
@ -341,16 +347,17 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
int i, flgs; int i, flgs;
struct net_device *vlandev; struct net_device *vlandev;
struct vlan_dev_priv *vlan; struct vlan_dev_priv *vlan;
bool last = false;
LIST_HEAD(list); LIST_HEAD(list);
if (is_vlan_dev(dev)) if (is_vlan_dev(dev))
__vlan_device_event(dev, event); __vlan_device_event(dev, event);
if ((event == NETDEV_UP) && if ((event == NETDEV_UP) &&
(dev->features & NETIF_F_HW_VLAN_FILTER)) { (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
pr_info("adding VLAN 0 to HW filter on device %s\n", pr_info("adding VLAN 0 to HW filter on device %s\n",
dev->name); dev->name);
vlan_vid_add(dev, 0); vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
} }
vlan_info = rtnl_dereference(dev->vlan_info); vlan_info = rtnl_dereference(dev->vlan_info);
@ -365,22 +372,13 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
switch (event) { switch (event) {
case NETDEV_CHANGE: case NETDEV_CHANGE:
/* Propagate real device state to vlan devices */ /* Propagate real device state to vlan devices */
for (i = 0; i < VLAN_N_VID; i++) { vlan_group_for_each_dev(grp, i, vlandev)
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
netif_stacked_transfer_operstate(dev, vlandev); netif_stacked_transfer_operstate(dev, vlandev);
}
break; break;
case NETDEV_CHANGEADDR: case NETDEV_CHANGEADDR:
/* Adjust unicast filters on underlying device */ /* Adjust unicast filters on underlying device */
for (i = 0; i < VLAN_N_VID; i++) { vlan_group_for_each_dev(grp, i, vlandev) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
flgs = vlandev->flags; flgs = vlandev->flags;
if (!(flgs & IFF_UP)) if (!(flgs & IFF_UP))
continue; continue;
@ -390,11 +388,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
break; break;
case NETDEV_CHANGEMTU: case NETDEV_CHANGEMTU:
for (i = 0; i < VLAN_N_VID; i++) { vlan_group_for_each_dev(grp, i, vlandev) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
if (vlandev->mtu <= dev->mtu) if (vlandev->mtu <= dev->mtu)
continue; continue;
@ -404,26 +398,16 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_FEAT_CHANGE: case NETDEV_FEAT_CHANGE:
/* Propagate device features to underlying device */ /* Propagate device features to underlying device */
for (i = 0; i < VLAN_N_VID; i++) { vlan_group_for_each_dev(grp, i, vlandev)
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
vlan_transfer_features(dev, vlandev); vlan_transfer_features(dev, vlandev);
}
break; break;
case NETDEV_DOWN: case NETDEV_DOWN:
if (dev->features & NETIF_F_HW_VLAN_FILTER) if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_vid_del(dev, 0); vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
/* Put all VLANs for this dev in the down state too. */ /* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_N_VID; i++) { vlan_group_for_each_dev(grp, i, vlandev) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
flgs = vlandev->flags; flgs = vlandev->flags;
if (!(flgs & IFF_UP)) if (!(flgs & IFF_UP))
continue; continue;
@ -437,11 +421,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_UP: case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */ /* Put all VLANs for this dev in the up state too. */
for (i = 0; i < VLAN_N_VID; i++) { vlan_group_for_each_dev(grp, i, vlandev) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
flgs = vlandev->flags; flgs = vlandev->flags;
if (flgs & IFF_UP) if (flgs & IFF_UP)
continue; continue;
@ -458,17 +438,15 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
if (dev->reg_state != NETREG_UNREGISTERING) if (dev->reg_state != NETREG_UNREGISTERING)
break; break;
for (i = 0; i < VLAN_N_VID; i++) { vlan_group_for_each_dev(grp, i, vlandev) {
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
/* removal of last vid destroys vlan_info, abort /* removal of last vid destroys vlan_info, abort
* afterwards */ * afterwards */
if (vlan_info->nr_vids == 1) if (vlan_info->nr_vids == 1)
i = VLAN_N_VID; last = true;
unregister_vlan_dev(vlandev, &list); unregister_vlan_dev(vlandev, &list);
if (last)
break;
} }
unregister_netdevice_many(&list); unregister_netdevice_many(&list);
break; break;
@ -482,13 +460,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_NOTIFY_PEERS: case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER: case NETDEV_BONDING_FAILOVER:
/* Propagate to vlan devices */ /* Propagate to vlan devices */
for (i = 0; i < VLAN_N_VID; i++) { vlan_group_for_each_dev(grp, i, vlandev)
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
call_netdevice_notifiers(event, vlandev); call_netdevice_notifiers(event, vlandev);
}
break; break;
} }

View File

@ -49,6 +49,7 @@ struct netpoll;
* @ingress_priority_map: ingress priority mappings * @ingress_priority_map: ingress priority mappings
* @nr_egress_mappings: number of egress priority mappings * @nr_egress_mappings: number of egress priority mappings
* @egress_priority_map: hash of egress priority mappings * @egress_priority_map: hash of egress priority mappings
* @vlan_proto: VLAN encapsulation protocol
* @vlan_id: VLAN identifier * @vlan_id: VLAN identifier
* @flags: device flags * @flags: device flags
* @real_dev: underlying netdevice * @real_dev: underlying netdevice
@ -62,6 +63,7 @@ struct vlan_dev_priv {
unsigned int nr_egress_mappings; unsigned int nr_egress_mappings;
struct vlan_priority_tci_mapping *egress_priority_map[16]; struct vlan_priority_tci_mapping *egress_priority_map[16];
__be16 vlan_proto;
u16 vlan_id; u16 vlan_id;
u16 flags; u16 flags;
@ -87,10 +89,17 @@ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
enum vlan_protos {
VLAN_PROTO_8021Q = 0,
VLAN_PROTO_8021AD,
VLAN_PROTO_NUM,
};
struct vlan_group { struct vlan_group {
unsigned int nr_vlan_devs; unsigned int nr_vlan_devs;
struct hlist_node hlist; /* linked list */ struct hlist_node hlist; /* linked list */
struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; struct net_device **vlan_devices_arrays[VLAN_PROTO_NUM]
[VLAN_GROUP_ARRAY_SPLIT_PARTS];
}; };
struct vlan_info { struct vlan_info {
@ -103,37 +112,66 @@ struct vlan_info {
struct rcu_head rcu; struct rcu_head rcu;
}; };
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, static inline unsigned int vlan_proto_idx(__be16 proto)
u16 vlan_id) {
switch (proto) {
case __constant_htons(ETH_P_8021Q):
return VLAN_PROTO_8021Q;
case __constant_htons(ETH_P_8021AD):
return VLAN_PROTO_8021AD;
default:
BUG();
}
}
static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg,
unsigned int pidx,
u16 vlan_id)
{ {
struct net_device **array; struct net_device **array;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
array = vg->vlan_devices_arrays[pidx]
[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
} }
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
__be16 vlan_proto,
u16 vlan_id)
{
return __vlan_group_get_device(vg, vlan_proto_idx(vlan_proto), vlan_id);
}
static inline void vlan_group_set_device(struct vlan_group *vg, static inline void vlan_group_set_device(struct vlan_group *vg,
u16 vlan_id, __be16 vlan_proto, u16 vlan_id,
struct net_device *dev) struct net_device *dev)
{ {
struct net_device **array; struct net_device **array;
if (!vg) if (!vg)
return; return;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; array = vg->vlan_devices_arrays[vlan_proto_idx(vlan_proto)]
[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
} }
/* Must be invoked with rcu_read_lock or with RTNL. */ /* Must be invoked with rcu_read_lock or with RTNL. */
static inline struct net_device *vlan_find_dev(struct net_device *real_dev, static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
u16 vlan_id) __be16 vlan_proto, u16 vlan_id)
{ {
struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
if (vlan_info) if (vlan_info)
return vlan_group_get_device(&vlan_info->grp, vlan_id); return vlan_group_get_device(&vlan_info->grp,
vlan_proto, vlan_id);
return NULL; return NULL;
} }
#define vlan_group_for_each_dev(grp, i, dev) \
for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \
if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \
(i) % VLAN_N_VID)))
/* found in vlan_dev.c */ /* found in vlan_dev.c */
void vlan_dev_set_ingress_priority(const struct net_device *dev, void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio); u32 skb_prio, u16 vlan_prio);
@ -142,7 +180,8 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask); int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); int vlan_check_real_dev(struct net_device *real_dev,
__be16 protocol, u16 vlan_id);
void vlan_setup(struct net_device *dev); void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev); int register_vlan_dev(struct net_device *dev);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head); void unregister_vlan_dev(struct net_device *dev, struct list_head *head);

View File

@ -8,11 +8,12 @@
bool vlan_do_receive(struct sk_buff **skbp) bool vlan_do_receive(struct sk_buff **skbp)
{ {
struct sk_buff *skb = *skbp; struct sk_buff *skb = *skbp;
__be16 vlan_proto = skb->vlan_proto;
u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
struct net_device *vlan_dev; struct net_device *vlan_dev;
struct vlan_pcpu_stats *rx_stats; struct vlan_pcpu_stats *rx_stats;
vlan_dev = vlan_find_dev(skb->dev, vlan_id); vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
if (!vlan_dev) if (!vlan_dev)
return false; return false;
@ -38,7 +39,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
* original position later * original position later
*/ */
skb_push(skb, offset); skb_push(skb, offset);
skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
skb->vlan_tci);
if (!skb) if (!skb)
return false; return false;
skb_pull(skb, offset + VLAN_HLEN); skb_pull(skb, offset + VLAN_HLEN);
@ -62,12 +64,13 @@ bool vlan_do_receive(struct sk_buff **skbp)
/* Must be invoked with rcu_read_lock. */ /* Must be invoked with rcu_read_lock. */
struct net_device *__vlan_find_dev_deep(struct net_device *dev, struct net_device *__vlan_find_dev_deep(struct net_device *dev,
u16 vlan_id) __be16 vlan_proto, u16 vlan_id)
{ {
struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
if (vlan_info) { if (vlan_info) {
return vlan_group_get_device(&vlan_info->grp, vlan_id); return vlan_group_get_device(&vlan_info->grp,
vlan_proto, vlan_id);
} else { } else {
/* /*
* Lower devices of master uppers (bonding, team) do not have * Lower devices of master uppers (bonding, team) do not have
@ -78,7 +81,8 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev,
upper_dev = netdev_master_upper_dev_get_rcu(dev); upper_dev = netdev_master_upper_dev_get_rcu(dev);
if (upper_dev) if (upper_dev)
return __vlan_find_dev_deep(upper_dev, vlan_id); return __vlan_find_dev_deep(upper_dev,
vlan_proto, vlan_id);
} }
return NULL; return NULL;
@ -125,7 +129,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
vhdr = (struct vlan_hdr *) skb->data; vhdr = (struct vlan_hdr *) skb->data;
vlan_tci = ntohs(vhdr->h_vlan_TCI); vlan_tci = ntohs(vhdr->h_vlan_TCI);
__vlan_hwaccel_put_tag(skb, vlan_tci); __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
skb_pull_rcsum(skb, VLAN_HLEN); skb_pull_rcsum(skb, VLAN_HLEN);
vlan_set_encap_proto(skb, vhdr); vlan_set_encap_proto(skb, vhdr);
@ -185,35 +189,49 @@ static struct vlan_info *vlan_info_alloc(struct net_device *dev)
struct vlan_vid_info { struct vlan_vid_info {
struct list_head list; struct list_head list;
unsigned short vid; __be16 proto;
u16 vid;
int refcount; int refcount;
}; };
static bool vlan_hw_filter_capable(const struct net_device *dev,
const struct vlan_vid_info *vid_info)
{
if (vid_info->proto == htons(ETH_P_8021Q) &&
dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
return true;
if (vid_info->proto == htons(ETH_P_8021AD) &&
dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
return true;
return false;
}
static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
unsigned short vid) __be16 proto, u16 vid)
{ {
struct vlan_vid_info *vid_info; struct vlan_vid_info *vid_info;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) { list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
if (vid_info->vid == vid) if (vid_info->proto == proto && vid_info->vid == vid)
return vid_info; return vid_info;
} }
return NULL; return NULL;
} }
static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid) static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
{ {
struct vlan_vid_info *vid_info; struct vlan_vid_info *vid_info;
vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
if (!vid_info) if (!vid_info)
return NULL; return NULL;
vid_info->proto = proto;
vid_info->vid = vid; vid_info->vid = vid;
return vid_info; return vid_info;
} }
static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
struct vlan_vid_info **pvid_info) struct vlan_vid_info **pvid_info)
{ {
struct net_device *dev = vlan_info->real_dev; struct net_device *dev = vlan_info->real_dev;
@ -221,12 +239,12 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
struct vlan_vid_info *vid_info; struct vlan_vid_info *vid_info;
int err; int err;
vid_info = vlan_vid_info_alloc(vid); vid_info = vlan_vid_info_alloc(proto, vid);
if (!vid_info) if (!vid_info)
return -ENOMEM; return -ENOMEM;
if (dev->features & NETIF_F_HW_VLAN_FILTER) { if (vlan_hw_filter_capable(dev, vid_info)) {
err = ops->ndo_vlan_rx_add_vid(dev, vid); err = ops->ndo_vlan_rx_add_vid(dev, proto, vid);
if (err) { if (err) {
kfree(vid_info); kfree(vid_info);
return err; return err;
@ -238,7 +256,7 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
return 0; return 0;
} }
int vlan_vid_add(struct net_device *dev, unsigned short vid) int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct vlan_info *vlan_info; struct vlan_info *vlan_info;
struct vlan_vid_info *vid_info; struct vlan_vid_info *vid_info;
@ -254,9 +272,9 @@ int vlan_vid_add(struct net_device *dev, unsigned short vid)
return -ENOMEM; return -ENOMEM;
vlan_info_created = true; vlan_info_created = true;
} }
vid_info = vlan_vid_info_get(vlan_info, vid); vid_info = vlan_vid_info_get(vlan_info, proto, vid);
if (!vid_info) { if (!vid_info) {
err = __vlan_vid_add(vlan_info, vid, &vid_info); err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
if (err) if (err)
goto out_free_vlan_info; goto out_free_vlan_info;
} }
@ -279,14 +297,15 @@ static void __vlan_vid_del(struct vlan_info *vlan_info,
{ {
struct net_device *dev = vlan_info->real_dev; struct net_device *dev = vlan_info->real_dev;
const struct net_device_ops *ops = dev->netdev_ops; const struct net_device_ops *ops = dev->netdev_ops;
unsigned short vid = vid_info->vid; __be16 proto = vid_info->proto;
u16 vid = vid_info->vid;
int err; int err;
if (dev->features & NETIF_F_HW_VLAN_FILTER) { if (vlan_hw_filter_capable(dev, vid_info)) {
err = ops->ndo_vlan_rx_kill_vid(dev, vid); err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
if (err) { if (err) {
pr_warn("failed to kill vid %d for device %s\n", pr_warn("failed to kill vid %04x/%d for device %s\n",
vid, dev->name); proto, vid, dev->name);
} }
} }
list_del(&vid_info->list); list_del(&vid_info->list);
@ -294,7 +313,7 @@ static void __vlan_vid_del(struct vlan_info *vlan_info,
vlan_info->nr_vids--; vlan_info->nr_vids--;
} }
void vlan_vid_del(struct net_device *dev, unsigned short vid) void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
{ {
struct vlan_info *vlan_info; struct vlan_info *vlan_info;
struct vlan_vid_info *vid_info; struct vlan_vid_info *vid_info;
@ -305,7 +324,7 @@ void vlan_vid_del(struct net_device *dev, unsigned short vid)
if (!vlan_info) if (!vlan_info)
return; return;
vid_info = vlan_vid_info_get(vlan_info, vid); vid_info = vlan_vid_info_get(vlan_info, proto, vid);
if (!vid_info) if (!vid_info)
return; return;
vid_info->refcount--; vid_info->refcount--;
@ -333,7 +352,7 @@ int vlan_vids_add_by_dev(struct net_device *dev,
return 0; return 0;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) { list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
err = vlan_vid_add(dev, vid_info->vid); err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
if (err) if (err)
goto unwind; goto unwind;
} }
@ -343,7 +362,7 @@ int vlan_vids_add_by_dev(struct net_device *dev,
list_for_each_entry_continue_reverse(vid_info, list_for_each_entry_continue_reverse(vid_info,
&vlan_info->vid_list, &vlan_info->vid_list,
list) { list) {
vlan_vid_del(dev, vid_info->vid); vlan_vid_del(dev, vid_info->proto, vid_info->vid);
} }
return err; return err;
@ -363,7 +382,7 @@ void vlan_vids_del_by_dev(struct net_device *dev,
return; return;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) list_for_each_entry(vid_info, &vlan_info->vid_list, list)
vlan_vid_del(dev, vid_info->vid); vlan_vid_del(dev, vid_info->proto, vid_info->vid);
} }
EXPORT_SYMBOL(vlan_vids_del_by_dev); EXPORT_SYMBOL(vlan_vids_del_by_dev);

View File

@ -99,6 +99,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
const void *daddr, const void *saddr, const void *daddr, const void *saddr,
unsigned int len) unsigned int len)
{ {
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_hdr *vhdr; struct vlan_hdr *vhdr;
unsigned int vhdrlen = 0; unsigned int vhdrlen = 0;
u16 vlan_tci = 0; u16 vlan_tci = 0;
@ -120,8 +121,8 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
else else
vhdr->h_vlan_encapsulated_proto = htons(len); vhdr->h_vlan_encapsulated_proto = htons(len);
skb->protocol = htons(ETH_P_8021Q); skb->protocol = vlan->vlan_proto;
type = ETH_P_8021Q; type = ntohs(vlan->vlan_proto);
vhdrlen = VLAN_HLEN; vhdrlen = VLAN_HLEN;
} }
@ -161,12 +162,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
* NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
*/ */
if (veth->h_vlan_proto != htons(ETH_P_8021Q) || if (veth->h_vlan_proto != vlan->vlan_proto ||
vlan->flags & VLAN_FLAG_REORDER_HDR) { vlan->flags & VLAN_FLAG_REORDER_HDR) {
u16 vlan_tci; u16 vlan_tci;
vlan_tci = vlan->vlan_id; vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
skb = __vlan_hwaccel_put_tag(skb, vlan_tci); skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
} }
skb->dev = vlan->real_dev; skb->dev = vlan->real_dev;
@ -583,7 +584,7 @@ static int vlan_dev_init(struct net_device *dev)
#endif #endif
dev->needed_headroom = real_dev->needed_headroom; dev->needed_headroom = real_dev->needed_headroom;
if (real_dev->features & NETIF_F_HW_VLAN_TX) { if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
dev->header_ops = real_dev->header_ops; dev->header_ops = real_dev->header_ops;
dev->hard_header_len = real_dev->hard_header_len; dev->hard_header_len = real_dev->hard_header_len;
} else { } else {

View File

@ -32,6 +32,8 @@ int vlan_gvrp_request_join(const struct net_device *dev)
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id); __be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return 0;
return garp_request_join(vlan->real_dev, &vlan_gvrp_app, return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
} }
@ -41,6 +43,8 @@ void vlan_gvrp_request_leave(const struct net_device *dev)
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id); __be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return;
garp_request_leave(vlan->real_dev, &vlan_gvrp_app, garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
} }

View File

@ -38,6 +38,8 @@ int vlan_mvrp_request_join(const struct net_device *dev)
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id); __be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return 0;
return mrp_request_join(vlan->real_dev, &vlan_mrp_app, return mrp_request_join(vlan->real_dev, &vlan_mrp_app,
&vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
} }
@ -47,6 +49,8 @@ void vlan_mvrp_request_leave(const struct net_device *dev)
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id); __be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return;
mrp_request_leave(vlan->real_dev, &vlan_mrp_app, mrp_request_leave(vlan->real_dev, &vlan_mrp_app,
&vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
} }

View File

@ -23,6 +23,7 @@ static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
[IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) }, [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
[IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED }, [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
[IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED }, [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
[IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 },
}; };
static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = { static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
@ -53,6 +54,16 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
if (!data) if (!data)
return -EINVAL; return -EINVAL;
if (data[IFLA_VLAN_PROTOCOL]) {
switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
case __constant_htons(ETH_P_8021Q):
case __constant_htons(ETH_P_8021AD):
break;
default:
return -EPROTONOSUPPORT;
}
}
if (data[IFLA_VLAN_ID]) { if (data[IFLA_VLAN_ID]) {
id = nla_get_u16(data[IFLA_VLAN_ID]); id = nla_get_u16(data[IFLA_VLAN_ID]);
if (id >= VLAN_VID_MASK) if (id >= VLAN_VID_MASK)
@ -107,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
{ {
struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev; struct net_device *real_dev;
__be16 proto;
int err; int err;
if (!data[IFLA_VLAN_ID]) if (!data[IFLA_VLAN_ID])
@ -118,11 +130,17 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
if (!real_dev) if (!real_dev)
return -ENODEV; return -ENODEV;
vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); if (data[IFLA_VLAN_PROTOCOL])
vlan->real_dev = real_dev; proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]);
vlan->flags = VLAN_FLAG_REORDER_HDR; else
proto = htons(ETH_P_8021Q);
err = vlan_check_real_dev(real_dev, vlan->vlan_id); vlan->vlan_proto = proto;
vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
vlan->real_dev = real_dev;
vlan->flags = VLAN_FLAG_REORDER_HDR;
err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id);
if (err < 0) if (err < 0)
return err; return err;
@ -151,7 +169,8 @@ static size_t vlan_get_size(const struct net_device *dev)
{ {
struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return nla_total_size(2) + /* IFLA_VLAN_ID */ return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
nla_total_size(2) + /* IFLA_VLAN_ID */
sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
vlan_qos_map_size(vlan->nr_ingress_mappings) + vlan_qos_map_size(vlan->nr_ingress_mappings) +
vlan_qos_map_size(vlan->nr_egress_mappings); vlan_qos_map_size(vlan->nr_egress_mappings);
@ -166,7 +185,8 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct nlattr *nest; struct nlattr *nest;
unsigned int i; unsigned int i;
if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id)) if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) ||
nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id))
goto nla_put_failure; goto nla_put_failure;
if (vlan->flags) { if (vlan->flags) {
f.flags = vlan->flags; f.flags = vlan->flags;

View File

@ -341,7 +341,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
} }
if (vid != -1) if (vid != -1)
skb = vlan_insert_tag(skb, vid); skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid);
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, soft_iface); skb->protocol = eth_type_trans(skb, soft_iface);

View File

@ -348,10 +348,10 @@ void br_dev_setup(struct net_device *dev)
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX |
NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX; NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX; NETIF_F_HW_VLAN_CTAG_TX;
br->dev = dev; br->dev = dev;
spin_lock_init(&br->lock); spin_lock_init(&br->lock);

View File

@ -535,7 +535,8 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
return br; return br;
vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK); vlan = __vlan_find_dev_deep(br, skb->vlan_proto,
vlan_tx_tag_get(skb) & VLAN_VID_MASK);
return vlan ? vlan : br; return vlan ? vlan : br;
} }

View File

@ -34,6 +34,7 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
{ {
const struct net_device_ops *ops;
struct net_bridge_port *p = NULL; struct net_bridge_port *p = NULL;
struct net_bridge *br; struct net_bridge *br;
struct net_device *dev; struct net_device *dev;
@ -53,15 +54,17 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
br = v->parent.br; br = v->parent.br;
dev = br->dev; dev = br->dev;
} }
ops = dev->netdev_ops;
if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) { if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
/* Add VLAN to the device filter if it is supported. /* Add VLAN to the device filter if it is supported.
* Stricly speaking, this is not necessary now, since * Stricly speaking, this is not necessary now, since
* devices are made promiscuous by the bridge, but if * devices are made promiscuous by the bridge, but if
* that ever changes this code will allow tagged * that ever changes this code will allow tagged
* traffic to enter the bridge. * traffic to enter the bridge.
*/ */
err = dev->netdev_ops->ndo_vlan_rx_add_vid(dev, vid); err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
vid);
if (err) if (err)
return err; return err;
} }
@ -82,8 +85,8 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
return 0; return 0;
out_filt: out_filt:
if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid); ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid);
return err; return err;
} }
@ -97,9 +100,10 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
if (v->port_idx && vid) { if (v->port_idx && vid) {
struct net_device *dev = v->parent.port->dev; struct net_device *dev = v->parent.port->dev;
const struct net_device_ops *ops = dev->netdev_ops;
if (dev->features & NETIF_F_HW_VLAN_FILTER) if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid); ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid);
} }
clear_bit(vid, v->vlan_bitmap); clear_bit(vid, v->vlan_bitmap);
@ -171,7 +175,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
* mac header. * mac header.
*/ */
skb_push(skb, ETH_HLEN); skb_push(skb, ETH_HLEN);
skb = __vlan_put_tag(skb, skb->vlan_tci); skb = __vlan_put_tag(skb, skb->vlan_proto, skb->vlan_tci);
if (!skb) if (!skb)
goto out; goto out;
/* put skb->data back to where it was */ /* put skb->data back to where it was */
@ -213,7 +217,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
/* PVID is set on this port. Any untagged ingress /* PVID is set on this port. Any untagged ingress
* frame is considered to belong to this vlan. * frame is considered to belong to this vlan.
*/ */
__vlan_hwaccel_put_tag(skb, pvid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
return true; return true;
} }

View File

@ -2212,7 +2212,7 @@ __be16 skb_network_protocol(struct sk_buff *skb)
__be16 type = skb->protocol; __be16 type = skb->protocol;
int vlan_depth = ETH_HLEN; int vlan_depth = ETH_HLEN;
while (type == htons(ETH_P_8021Q)) { while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
struct vlan_hdr *vh; struct vlan_hdr *vh;
if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
@ -2428,20 +2428,22 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
features &= ~NETIF_F_GSO_MASK; features &= ~NETIF_F_GSO_MASK;
if (protocol == htons(ETH_P_8021Q)) { if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto; protocol = veh->h_vlan_encapsulated_proto;
} else if (!vlan_tx_tag_present(skb)) { } else if (!vlan_tx_tag_present(skb)) {
return harmonize_features(skb, protocol, features); return harmonize_features(skb, protocol, features);
} }
features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
if (protocol != htons(ETH_P_8021Q)) { if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
return harmonize_features(skb, protocol, features); return harmonize_features(skb, protocol, features);
} else { } else {
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
return harmonize_features(skb, protocol, features); return harmonize_features(skb, protocol, features);
} }
} }
@ -2482,8 +2484,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
features = netif_skb_features(skb); features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) && if (vlan_tx_tag_present(skb) &&
!(features & NETIF_F_HW_VLAN_TX)) { !vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); skb = __vlan_put_tag(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (unlikely(!skb)) if (unlikely(!skb))
goto out; goto out;
@ -3359,6 +3362,7 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
case __constant_htons(ETH_P_IP): case __constant_htons(ETH_P_IP):
case __constant_htons(ETH_P_IPV6): case __constant_htons(ETH_P_IPV6):
case __constant_htons(ETH_P_8021Q): case __constant_htons(ETH_P_8021Q):
case __constant_htons(ETH_P_8021AD):
return true; return true;
default: default:
return false; return false;
@ -3399,7 +3403,8 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
__this_cpu_inc(softnet_data.processed); __this_cpu_inc(softnet_data.processed);
if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
skb = vlan_untag(skb); skb = vlan_untag(skb);
if (unlikely(!skb)) if (unlikely(!skb))
goto unlock; goto unlock;
@ -5180,7 +5185,8 @@ int register_netdevice(struct net_device *dev)
} }
} }
if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_FILTER) && if (((dev->hw_features | dev->features) &
NETIF_F_HW_VLAN_CTAG_FILTER) &&
(!dev->netdev_ops->ndo_vlan_rx_add_vid || (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
!dev->netdev_ops->ndo_vlan_rx_kill_vid)) { !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");

View File

@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
[NETIF_F_HIGHDMA_BIT] = "highdma", [NETIF_F_HIGHDMA_BIT] = "highdma",
[NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
[NETIF_F_HW_VLAN_TX_BIT] = "tx-vlan-hw-insert", [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert",
[NETIF_F_HW_VLAN_RX_BIT] = "rx-vlan-hw-parse", [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse",
[NETIF_F_HW_VLAN_FILTER_BIT] = "rx-vlan-filter", [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter",
[NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged",
[NETIF_F_GSO_BIT] = "tx-generic-segmentation", [NETIF_F_GSO_BIT] = "tx-generic-segmentation",
[NETIF_F_LLTX_BIT] = "tx-lockless", [NETIF_F_LLTX_BIT] = "tx-lockless",
@ -267,18 +267,19 @@ static int ethtool_set_one_feature(struct net_device *dev,
#define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \
ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH)
#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \ #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \
NETIF_F_RXHASH)
static u32 __ethtool_get_flags(struct net_device *dev) static u32 __ethtool_get_flags(struct net_device *dev)
{ {
u32 flags = 0; u32 flags = 0;
if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO; if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO;
if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN; if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) flags |= ETH_FLAG_RXVLAN;
if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN; if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) flags |= ETH_FLAG_TXVLAN;
if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE; if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE;
if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH; if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH;
return flags; return flags;
} }
@ -291,8 +292,8 @@ static int __ethtool_set_flags(struct net_device *dev, u32 data)
return -EINVAL; return -EINVAL;
if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO; if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO;
if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_RX; if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_CTAG_RX;
if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_TX; if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_CTAG_TX;
if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE; if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE;
if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH; if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH;

View File

@ -383,8 +383,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (__netif_tx_trylock(txq)) { if (__netif_tx_trylock(txq)) {
if (!netif_xmit_stopped(txq)) { if (!netif_xmit_stopped(txq)) {
if (vlan_tx_tag_present(skb) && if (vlan_tx_tag_present(skb) &&
!(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { !vlan_hw_offload_capable(netif_skb_features(skb),
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
if (unlikely(!skb)) if (unlikely(!skb))
break; break;
skb->vlan_tci = 0; skb->vlan_tci = 0;

View File

@ -707,6 +707,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->tc_verd = old->tc_verd; new->tc_verd = old->tc_verd;
#endif #endif
#endif #endif
new->vlan_proto = old->vlan_proto;
new->vlan_tci = old->vlan_tci; new->vlan_tci = old->vlan_tci;
skb_copy_secmark(new, old); skb_copy_secmark(new, old);

View File

@ -98,7 +98,7 @@ static int pop_vlan(struct sk_buff *skb)
if (unlikely(err)) if (unlikely(err))
return err; return err;
__vlan_hwaccel_put_tag(skb, ntohs(tci)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
return 0; return 0;
} }
@ -110,7 +110,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
/* push down current VLAN tag */ /* push down current VLAN tag */
current_tag = vlan_tx_tag_get(skb); current_tag = vlan_tx_tag_get(skb);
if (!__vlan_put_tag(skb, current_tag)) if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
return -ENOMEM; return -ENOMEM;
if (skb->ip_summed == CHECKSUM_COMPLETE) if (skb->ip_summed == CHECKSUM_COMPLETE)
@ -118,7 +118,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
+ (2 * ETH_ALEN), VLAN_HLEN, 0)); + (2 * ETH_ALEN), VLAN_HLEN, 0));
} }
__vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
return 0; return 0;
} }

View File

@ -401,7 +401,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
if (!nskb) if (!nskb)
return -ENOMEM; return -ENOMEM;
nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
if (!nskb) if (!nskb)
return -ENOMEM; return -ENOMEM;

Some files were not shown because too many files have changed in this diff Show More