mirror of https://gitee.com/openkylin/linux.git
Merge branch '8021ad'
Patrick McHardy says: ==================== The following patches add support for 802.1ad (provider tagging) to the VLAN driver. The patchset consists of the following parts: - renaming of the NET_F_HW_VLAN feature flags to indicate that they only operate on CTAGs - preparation for 802.1ad VLAN filtering offload by adding a proto argument to the rx_{add,kill}_vid net_device_ops callbacks - preparation of the VLAN code to support multiple protocols by making the protocol used for tagging a property of the VLAN device and converting the device lookup functions accordingly - second step of preparation of the VLAN code by making the packet tagging functions take a protocol argument - introducation of 802.1ad support in the VLAN code, consisting mainly of checking for ETH_P_8021AD in a couple of places and testing the netdevice offload feature checks to take the protocol into account - announcement of STAG offloading capabilities in a couple of drivers for virtual network devices The patchset is based on net-next.git and has been tested with single and double tagging with and without HW acceleration (for CTAGs). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
447b816fe0
|
@ -2948,7 +2948,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
|
|||
nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
|
||||
nesvnic->netdev->name, vlan_tag);
|
||||
|
||||
__vlan_hwaccel_put_tag(rx_skb, vlan_tag);
|
||||
__vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
}
|
||||
if (nes_use_lro)
|
||||
lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
|
||||
|
|
|
@ -1599,7 +1599,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
|
|||
|
||||
/* Enable/Disable VLAN Stripping */
|
||||
u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
u32temp &= 0xfdffffff;
|
||||
else
|
||||
u32temp |= 0x02000000;
|
||||
|
@ -1614,10 +1614,10 @@ static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_feat
|
|||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -1628,7 +1628,7 @@ static int nes_set_features(struct net_device *netdev, netdev_features_t feature
|
|||
struct nes_device *nesdev = nesvnic->nesdev;
|
||||
u32 changed = netdev->features ^ features;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
nes_vlan_mode(netdev, nesdev, features);
|
||||
|
||||
return 0;
|
||||
|
@ -1706,11 +1706,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
|
|||
netdev->dev_addr[4] = (u8)(u64temp>>8);
|
||||
netdev->dev_addr[5] = (u8)u64temp;
|
||||
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
|
||||
netdev->hw_features |= NETIF_F_TSO;
|
||||
|
||||
netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX;
|
||||
netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
netdev->hw_features |= NETIF_F_LRO;
|
||||
|
||||
nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
|
||||
|
|
|
@ -514,7 +514,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
|
|||
skb->dev = client_info->slave->dev;
|
||||
|
||||
if (client_info->tag) {
|
||||
skb = vlan_put_tag(skb, client_info->vlan_id);
|
||||
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
|
||||
if (!skb) {
|
||||
pr_err("%s: Error: failed to insert VLAN tag\n",
|
||||
client_info->slave->bond->dev->name);
|
||||
|
@ -1014,7 +1014,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
|
|||
continue;
|
||||
}
|
||||
|
||||
skb = vlan_put_tag(skb, vlan->vlan_id);
|
||||
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id);
|
||||
if (!skb) {
|
||||
pr_err("%s: Error: failed to insert VLAN tag\n",
|
||||
bond->dev->name);
|
||||
|
|
|
@ -428,14 +428,15 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
|
|||
* @bond_dev: bonding net device that got called
|
||||
* @vid: vlan id being added
|
||||
*/
|
||||
static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
|
||||
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
struct slave *slave, *stop_at;
|
||||
int i, res;
|
||||
|
||||
bond_for_each_slave(bond, slave, i) {
|
||||
res = vlan_vid_add(slave->dev, vid);
|
||||
res = vlan_vid_add(slave->dev, proto, vid);
|
||||
if (res)
|
||||
goto unwind;
|
||||
}
|
||||
|
@ -453,7 +454,7 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
|
|||
/* unwind from head to the slave that failed */
|
||||
stop_at = slave;
|
||||
bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
|
||||
vlan_vid_del(slave->dev, vid);
|
||||
vlan_vid_del(slave->dev, proto, vid);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -463,14 +464,15 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
|
|||
* @bond_dev: bonding net device that got called
|
||||
* @vid: vlan id being removed
|
||||
*/
|
||||
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
|
||||
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
struct slave *slave;
|
||||
int i, res;
|
||||
|
||||
bond_for_each_slave(bond, slave, i)
|
||||
vlan_vid_del(slave->dev, vid);
|
||||
vlan_vid_del(slave->dev, proto, vid);
|
||||
|
||||
res = bond_del_vlan(bond, vid);
|
||||
if (res) {
|
||||
|
@ -488,7 +490,8 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla
|
|||
int res;
|
||||
|
||||
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
|
||||
res = vlan_vid_add(slave_dev, vlan->vlan_id);
|
||||
res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
|
||||
vlan->vlan_id);
|
||||
if (res)
|
||||
pr_warning("%s: Failed to add vlan id %d to device %s\n",
|
||||
bond->dev->name, vlan->vlan_id,
|
||||
|
@ -504,7 +507,7 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
|
|||
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
|
||||
if (!vlan->vlan_id)
|
||||
continue;
|
||||
vlan_vid_del(slave_dev, vlan->vlan_id);
|
||||
vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -779,7 +782,7 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
|
|||
|
||||
/* rejoin all groups on vlan devices */
|
||||
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
|
||||
vlan_dev = __vlan_find_dev_deep(bond_dev,
|
||||
vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
|
||||
vlan->vlan_id);
|
||||
if (vlan_dev)
|
||||
__bond_resend_igmp_join_requests(vlan_dev);
|
||||
|
@ -2509,7 +2512,8 @@ static int bond_has_this_ip(struct bonding *bond, __be32 ip)
|
|||
|
||||
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
|
||||
rcu_read_lock();
|
||||
vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id);
|
||||
vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q),
|
||||
vlan->vlan_id);
|
||||
rcu_read_unlock();
|
||||
if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip))
|
||||
return 1;
|
||||
|
@ -2538,7 +2542,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
|
|||
return;
|
||||
}
|
||||
if (vlan_id) {
|
||||
skb = vlan_put_tag(skb, vlan_id);
|
||||
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
||||
if (!skb) {
|
||||
pr_err("failed to insert VLAN tag\n");
|
||||
return;
|
||||
|
@ -2600,6 +2604,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
|
|||
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
|
||||
rcu_read_lock();
|
||||
vlan_dev = __vlan_find_dev_deep(bond->dev,
|
||||
htons(ETH_P_8021Q),
|
||||
vlan->vlan_id);
|
||||
rcu_read_unlock();
|
||||
if (vlan_dev == rt->dst.dev) {
|
||||
|
@ -4322,9 +4327,9 @@ static void bond_setup(struct net_device *bond_dev)
|
|||
*/
|
||||
|
||||
bond_dev->hw_features = BOND_VLAN_FEATURES |
|
||||
NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
|
||||
bond_dev->features |= bond_dev->hw_features;
|
||||
|
|
|
@ -1690,7 +1690,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
|
|||
skb_checksum_none_assert(new_skb);
|
||||
|
||||
if (rx->rxStatus & TYPHOON_RX_VLAN)
|
||||
__vlan_hwaccel_put_tag(new_skb,
|
||||
__vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
|
||||
ntohl(rx->vlanTag) & 0xffff);
|
||||
netif_receive_skb(new_skb);
|
||||
|
||||
|
@ -2445,9 +2445,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
* settings -- so we only allow the user to toggle the TX processing.
|
||||
*/
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
|
||||
NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
dev->features = dev->hw_features |
|
||||
NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
|
||||
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
|
||||
|
||||
if(register_netdev(dev) < 0) {
|
||||
err_msg = "unable to register netdev";
|
||||
|
|
|
@ -594,7 +594,8 @@ static const struct ethtool_ops ethtool_ops;
|
|||
|
||||
|
||||
#ifdef VLAN_SUPPORT
|
||||
static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
||||
static int netdev_vlan_rx_add_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
|
||||
|
@ -608,7 +609,8 @@ static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
static int netdev_vlan_rx_kill_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
|
||||
|
@ -702,7 +704,7 @@ static int starfire_init_one(struct pci_dev *pdev,
|
|||
#endif /* ZEROCOPY */
|
||||
|
||||
#ifdef VLAN_SUPPORT
|
||||
dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
#endif /* VLAN_RX_KILL_VID */
|
||||
#ifdef ADDR_64BITS
|
||||
dev->features |= NETIF_F_HIGHDMA;
|
||||
|
@ -1496,7 +1498,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
|
|||
printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
|
||||
vlid);
|
||||
}
|
||||
__vlan_hwaccel_put_tag(skb, vlid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
|
||||
}
|
||||
#endif /* VLAN_SUPPORT */
|
||||
netif_receive_skb(skb);
|
||||
|
|
|
@ -472,7 +472,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
|
|||
ap->name = pci_name(pdev);
|
||||
|
||||
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
|
||||
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
dev->watchdog_timeo = 5*HZ;
|
||||
|
||||
|
@ -2019,7 +2019,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
|
|||
|
||||
/* send it up */
|
||||
if ((bd_flags & BD_FLG_VLAN_TAG))
|
||||
__vlan_hwaccel_put_tag(skb, retdesc->vlan);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
|
||||
netif_rx(skb);
|
||||
|
||||
dev->stats.rx_packets++;
|
||||
|
|
|
@ -793,7 +793,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
|
|||
#if AMD8111E_VLAN_TAG_USED
|
||||
if (vtag == TT_VLAN_TAGGED){
|
||||
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
|
||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
}
|
||||
#endif
|
||||
netif_receive_skb(skb);
|
||||
|
@ -1869,7 +1869,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
|
|||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
|
||||
#if AMD8111E_VLAN_TAG_USED
|
||||
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
|
||||
#endif
|
||||
|
||||
lp = netdev_priv(dev);
|
||||
|
@ -1907,7 +1907,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
|
|||
netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
|
||||
|
||||
#if AMD8111E_VLAN_TAG_USED
|
||||
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
#endif
|
||||
/* Probe the external PHY */
|
||||
amd8111e_probe_ext_phy(dev);
|
||||
|
|
|
@ -417,7 +417,7 @@ static void atl1c_set_multi(struct net_device *netdev)
|
|||
|
||||
static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
|
||||
{
|
||||
if (features & NETIF_F_HW_VLAN_RX) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
/* enable VLAN tag insert/strip */
|
||||
*mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
|
||||
} else {
|
||||
|
@ -494,10 +494,10 @@ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
|
|||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
if (netdev->mtu > MAX_TSO_FRAME_SIZE)
|
||||
features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
|
||||
|
@ -510,7 +510,7 @@ static int atl1c_set_features(struct net_device *netdev,
|
|||
{
|
||||
netdev_features_t changed = netdev->features ^ features;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
atl1c_vlan_mode(netdev, features);
|
||||
|
||||
return 0;
|
||||
|
@ -1809,7 +1809,7 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
|
|||
|
||||
AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
|
||||
vlan = le16_to_cpu(vlan);
|
||||
__vlan_hwaccel_put_tag(skb, vlan);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
|
||||
}
|
||||
netif_receive_skb(skb);
|
||||
|
||||
|
@ -2475,13 +2475,13 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
|
|||
atl1c_set_ethtool_ops(netdev);
|
||||
|
||||
/* TODO: add when ready */
|
||||
netdev->hw_features = NETIF_F_SG |
|
||||
NETIF_F_HW_CSUM |
|
||||
NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_TSO |
|
||||
netdev->hw_features = NETIF_F_SG |
|
||||
NETIF_F_HW_CSUM |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_TSO |
|
||||
NETIF_F_TSO6;
|
||||
netdev->features = netdev->hw_features |
|
||||
NETIF_F_HW_VLAN_TX;
|
||||
netdev->features = netdev->hw_features |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -315,7 +315,7 @@ static void atl1e_set_multi(struct net_device *netdev)
|
|||
|
||||
static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
|
||||
{
|
||||
if (features & NETIF_F_HW_VLAN_RX) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
/* enable VLAN tag insert/strip */
|
||||
*mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
|
||||
} else {
|
||||
|
@ -378,10 +378,10 @@ static netdev_features_t atl1e_fix_features(struct net_device *netdev,
|
|||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -391,7 +391,7 @@ static int atl1e_set_features(struct net_device *netdev,
|
|||
{
|
||||
netdev_features_t changed = netdev->features ^ features;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
atl1e_vlan_mode(netdev, features);
|
||||
|
||||
return 0;
|
||||
|
@ -1435,7 +1435,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
|
|||
netdev_dbg(netdev,
|
||||
"RXD VLAN TAG<RRD>=0x%04x\n",
|
||||
prrs->vtag);
|
||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
}
|
||||
netif_receive_skb(skb);
|
||||
|
||||
|
@ -2198,9 +2198,9 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
|
|||
atl1e_set_ethtool_ops(netdev);
|
||||
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
|
||||
NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
netdev->features = netdev->hw_features | NETIF_F_LLTX |
|
||||
NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2024,7 +2024,7 @@ static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
|
|||
((rrd->vlan_tag & 7) << 13) |
|
||||
((rrd->vlan_tag & 8) << 9);
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
}
|
||||
netif_receive_skb(skb);
|
||||
|
||||
|
@ -3018,10 +3018,10 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
netdev->features = NETIF_F_HW_CSUM;
|
||||
netdev->features |= NETIF_F_SG;
|
||||
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
|
||||
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
|
||||
|
||||
netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO |
|
||||
NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
/* is this valid? see atl1_setup_mac_ctrl() */
|
||||
netdev->features |= NETIF_F_RXCSUM;
|
||||
|
|
|
@ -363,7 +363,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
|
|||
|
||||
static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
|
||||
{
|
||||
if (features & NETIF_F_HW_VLAN_RX) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
/* enable VLAN tag insert/strip */
|
||||
*ctrl |= MAC_CTRL_RMV_VLAN;
|
||||
} else {
|
||||
|
@ -399,10 +399,10 @@ static netdev_features_t atl2_fix_features(struct net_device *netdev,
|
|||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -412,7 +412,7 @@ static int atl2_set_features(struct net_device *netdev,
|
|||
{
|
||||
netdev_features_t changed = netdev->features ^ features;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
atl2_vlan_mode(netdev, features);
|
||||
|
||||
return 0;
|
||||
|
@ -452,7 +452,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
|
|||
((rxd->status.vtag&7) << 13) |
|
||||
((rxd->status.vtag&8) << 9);
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
}
|
||||
netif_rx(skb);
|
||||
netdev->stats.rx_bytes += rx_size;
|
||||
|
@ -887,7 +887,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
|
|||
skb->len-copy_len);
|
||||
offset = ((u32)(skb->len-copy_len + 3) & ~3);
|
||||
}
|
||||
#ifdef NETIF_F_HW_VLAN_TX
|
||||
#ifdef NETIF_F_HW_VLAN_CTAG_TX
|
||||
if (vlan_tx_tag_present(skb)) {
|
||||
u16 vlan_tag = vlan_tx_tag_get(skb);
|
||||
vlan_tag = (vlan_tag << 4) |
|
||||
|
@ -1413,8 +1413,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
err = -EIO;
|
||||
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_RX;
|
||||
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
|
||||
|
||||
/* Init PHY as early as possible due to power saving issue */
|
||||
atl2_phy_init(&adapter->hw);
|
||||
|
|
|
@ -220,7 +220,7 @@ static void atlx_link_chg_task(struct work_struct *work)
|
|||
|
||||
static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
|
||||
{
|
||||
if (features & NETIF_F_HW_VLAN_RX) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
/* enable VLAN tag insert/strip */
|
||||
*ctrl |= MAC_CTRL_RMV_VLAN;
|
||||
} else {
|
||||
|
@ -257,10 +257,10 @@ static netdev_features_t atlx_fix_features(struct net_device *netdev,
|
|||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ static int atlx_set_features(struct net_device *netdev,
|
|||
{
|
||||
netdev_features_t changed = netdev->features ^ features;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
atlx_vlan_mode(netdev, features);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -3211,7 +3211,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
|
|||
}
|
||||
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
|
||||
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
|
||||
__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
|
||||
|
||||
skb->protocol = eth_type_trans(skb, bp->dev);
|
||||
|
||||
|
@ -3553,7 +3553,7 @@ bnx2_set_rx_mode(struct net_device *dev)
|
|||
rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
|
||||
BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
|
||||
sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
|
||||
if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
|
||||
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
|
||||
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
|
||||
if (dev->flags & IFF_PROMISC) {
|
||||
|
@ -7695,7 +7695,7 @@ bnx2_fix_features(struct net_device *dev, netdev_features_t features)
|
|||
struct bnx2 *bp = netdev_priv(dev);
|
||||
|
||||
if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
|
||||
features |= NETIF_F_HW_VLAN_RX;
|
||||
features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -7706,12 +7706,12 @@ bnx2_set_features(struct net_device *dev, netdev_features_t features)
|
|||
struct bnx2 *bp = netdev_priv(dev);
|
||||
|
||||
/* TSO with VLAN tag won't work with current firmware */
|
||||
if (features & NETIF_F_HW_VLAN_TX)
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_TX)
|
||||
dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
|
||||
else
|
||||
dev->vlan_features &= ~NETIF_F_ALL_TSO;
|
||||
|
||||
if ((!!(features & NETIF_F_HW_VLAN_RX) !=
|
||||
if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
|
||||
!!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
|
||||
netif_running(dev)) {
|
||||
bnx2_netif_stop(bp, false);
|
||||
|
@ -8551,7 +8551,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
|
||||
|
||||
dev->vlan_features = dev->hw_features;
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->features |= dev->hw_features;
|
||||
dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
|
|
|
@ -719,7 +719,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||
if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
|
||||
skb, cqe, cqe_idx)) {
|
||||
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
|
||||
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
|
||||
bnx2x_gro_receive(bp, fp, skb);
|
||||
} else {
|
||||
DP(NETIF_MSG_RX_STATUS,
|
||||
|
@ -994,7 +994,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
|||
|
||||
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
|
||||
PARSING_FLAGS_VLAN)
|
||||
__vlan_hwaccel_put_tag(skb,
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||
le16_to_cpu(cqe_fp->vlan_tag));
|
||||
napi_gro_receive(&fp->napi, skb);
|
||||
|
||||
|
|
|
@ -12027,7 +12027,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
|||
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
|
||||
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
|
||||
NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if (!CHIP_IS_E1x(bp)) {
|
||||
dev->hw_features |= NETIF_F_GSO_GRE;
|
||||
dev->hw_enc_features =
|
||||
|
@ -12039,7 +12039,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
|||
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
|
||||
|
||||
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
if (bp->flags & USING_DAC_FLAG)
|
||||
dev->features |= NETIF_F_HIGHDMA;
|
||||
|
||||
|
|
|
@ -6715,7 +6715,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
|
|||
|
||||
if (desc->type_flags & RXD_FLAG_VLAN &&
|
||||
!(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
|
||||
__vlan_hwaccel_put_tag(skb,
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||
desc->err_vlan & RXD_VLAN_MASK);
|
||||
|
||||
napi_gro_receive(&tnapi->napi, skb);
|
||||
|
@ -17197,7 +17197,7 @@ static int tg3_init_one(struct pci_dev *pdev,
|
|||
|
||||
tg3_init_bufmgr_config(tp);
|
||||
|
||||
features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
/* 5700 B0 chips do not support checksumming correctly due
|
||||
* to hardware bugs.
|
||||
|
|
|
@ -610,7 +610,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|||
rcb->rxq->rx_bytes += length;
|
||||
|
||||
if (flags & BNA_CQ_EF_VLAN)
|
||||
__vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
|
||||
|
||||
if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
|
||||
napi_gro_frags(&rx_ctrl->napi);
|
||||
|
@ -3068,8 +3068,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
}
|
||||
|
||||
static int
|
||||
bnad_vlan_rx_add_vid(struct net_device *netdev,
|
||||
unsigned short vid)
|
||||
bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct bnad *bnad = netdev_priv(netdev);
|
||||
unsigned long flags;
|
||||
|
@ -3090,8 +3089,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
|
|||
}
|
||||
|
||||
static int
|
||||
bnad_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
unsigned short vid)
|
||||
bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct bnad *bnad = netdev_priv(netdev);
|
||||
unsigned long flags;
|
||||
|
@ -3170,14 +3168,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
|
|||
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO6;
|
||||
|
||||
netdev->features |= netdev->hw_features |
|
||||
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
if (using_dac)
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
|
|
|
@ -856,10 +856,10 @@ static netdev_features_t t1_fix_features(struct net_device *dev,
|
|||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -869,7 +869,7 @@ static int t1_set_features(struct net_device *dev, netdev_features_t features)
|
|||
netdev_features_t changed = dev->features ^ features;
|
||||
struct adapter *adapter = dev->ml_priv;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
t1_vlan_mode(adapter, features);
|
||||
|
||||
return 0;
|
||||
|
@ -1085,8 +1085,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
if (vlan_tso_capable(adapter)) {
|
||||
netdev->features |=
|
||||
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
/* T204: disable TSO */
|
||||
if (!(is_T2(adapter)) || bi->port_number != 4) {
|
||||
|
|
|
@ -734,7 +734,7 @@ void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
|
|||
{
|
||||
struct sge *sge = adapter->sge;
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
sge->sge_control |= F_VLAN_XTRACT;
|
||||
else
|
||||
sge->sge_control &= ~F_VLAN_XTRACT;
|
||||
|
@ -1386,7 +1386,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
|
|||
|
||||
if (p->vlan_valid) {
|
||||
st->vlan_xtract++;
|
||||
__vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
|
||||
}
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
|
|
@ -1181,14 +1181,15 @@ static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
|
|||
|
||||
if (adapter->params.rev > 0) {
|
||||
t3_set_vlan_accel(adapter, 1 << pi->port_id,
|
||||
features & NETIF_F_HW_VLAN_RX);
|
||||
features & NETIF_F_HW_VLAN_CTAG_RX);
|
||||
} else {
|
||||
/* single control for all ports */
|
||||
unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
|
||||
unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
for_each_port(adapter, i)
|
||||
have_vlans |=
|
||||
adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
|
||||
adapter->port[i]->features &
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
t3_set_vlan_accel(adapter, 1, have_vlans);
|
||||
}
|
||||
|
@ -2563,10 +2564,10 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
|
|||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -2575,7 +2576,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
|
|||
{
|
||||
netdev_features_t changed = dev->features ^ features;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
cxgb_vlan_mode(dev, features);
|
||||
|
||||
return 0;
|
||||
|
@ -3288,8 +3289,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netdev->mem_start = mmio_start;
|
||||
netdev->mem_end = mmio_start + mmio_len - 1;
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
|
||||
netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
netdev->features |= netdev->hw_features |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
netdev->vlan_features |= netdev->features & VLAN_FEAT;
|
||||
if (pci_using_dac)
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
|
|
|
@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
|
|||
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
|
||||
rcu_read_lock();
|
||||
if (vlan && vlan != VLAN_VID_MASK) {
|
||||
dev = __vlan_find_dev_deep(dev, vlan);
|
||||
dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
|
||||
} else if (netif_is_bond_slave(dev)) {
|
||||
struct net_device *upper_dev;
|
||||
|
||||
|
|
|
@ -2030,7 +2030,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
|
|||
|
||||
if (p->vlan_valid) {
|
||||
qs->port_stats[SGE_PSTAT_VLANEX]++;
|
||||
__vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
|
||||
}
|
||||
if (rq->polling) {
|
||||
if (lro)
|
||||
|
@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
|
|||
|
||||
if (cpl->vlan_valid) {
|
||||
qs->port_stats[SGE_PSTAT_VLANEX]++;
|
||||
__vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
|
||||
}
|
||||
napi_gro_frags(&qs->napi);
|
||||
}
|
||||
|
|
|
@ -559,7 +559,7 @@ static int link_start(struct net_device *dev)
|
|||
* that step explicitly.
|
||||
*/
|
||||
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
|
||||
!!(dev->features & NETIF_F_HW_VLAN_RX), true);
|
||||
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
|
||||
if (ret == 0) {
|
||||
ret = t4_change_mac(pi->adapter, mb, pi->viid,
|
||||
pi->xact_addr_filt, dev->dev_addr, true,
|
||||
|
@ -2722,14 +2722,14 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
|
|||
netdev_features_t changed = dev->features ^ features;
|
||||
int err;
|
||||
|
||||
if (!(changed & NETIF_F_HW_VLAN_RX))
|
||||
if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
|
||||
return 0;
|
||||
|
||||
err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
|
||||
-1, -1, -1,
|
||||
!!(features & NETIF_F_HW_VLAN_RX), true);
|
||||
!!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
|
||||
if (unlikely(err))
|
||||
dev->features = features ^ NETIF_F_HW_VLAN_RX;
|
||||
dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -5628,7 +5628,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_RXCSUM | NETIF_F_RXHASH |
|
||||
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
if (highdma)
|
||||
netdev->hw_features |= NETIF_F_HIGHDMA;
|
||||
netdev->features |= netdev->hw_features;
|
||||
|
|
|
@ -1633,7 +1633,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
|
|||
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
|
||||
|
||||
if (unlikely(pkt->vlan_ex)) {
|
||||
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
|
||||
rxq->stats.vlan_ex++;
|
||||
}
|
||||
ret = napi_gro_frags(&rxq->rspq.napi);
|
||||
|
@ -1705,7 +1705,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
|
|||
skb_checksum_none_assert(skb);
|
||||
|
||||
if (unlikely(pkt->vlan_ex)) {
|
||||
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
|
||||
rxq->stats.vlan_ex++;
|
||||
}
|
||||
netif_receive_skb(skb);
|
||||
|
|
|
@ -1100,10 +1100,10 @@ static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
|
|||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -1114,9 +1114,9 @@ static int cxgb4vf_set_features(struct net_device *dev,
|
|||
struct port_info *pi = netdev_priv(dev);
|
||||
netdev_features_t changed = dev->features ^ features;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
|
||||
features & NETIF_F_HW_VLAN_TX, 0);
|
||||
features & NETIF_F_HW_VLAN_CTAG_TX, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2623,11 +2623,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
|
|||
|
||||
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
|
||||
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
|
||||
netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_HIGHDMA;
|
||||
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX;
|
||||
netdev->features = netdev->hw_features |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if (pci_using_dac)
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
|
||||
|
|
|
@ -1482,7 +1482,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
|
|||
skb_record_rx_queue(skb, rxq->rspq.idx);
|
||||
|
||||
if (pkt->vlan_ex) {
|
||||
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
|
||||
__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
|
||||
be16_to_cpu(pkt->vlan));
|
||||
rxq->stats.vlan_ex++;
|
||||
}
|
||||
ret = napi_gro_frags(&rxq->rspq.napi);
|
||||
|
@ -1551,7 +1552,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
|
|||
|
||||
if (pkt->vlan_ex) {
|
||||
rxq->stats.vlan_ex++;
|
||||
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
|
||||
}
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
|
|
@ -212,7 +212,7 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
|
|||
}
|
||||
|
||||
/* rtnl lock is held */
|
||||
int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||
int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
int err;
|
||||
|
@ -225,7 +225,7 @@ int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
|||
}
|
||||
|
||||
/* rtnl lock is held */
|
||||
int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||
int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
int err;
|
||||
|
|
|
@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
|
|||
int broadcast, int promisc, int allmulti);
|
||||
int enic_dev_add_addr(struct enic *enic, u8 *addr);
|
||||
int enic_dev_del_addr(struct enic *enic, u8 *addr);
|
||||
int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
|
||||
int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
|
||||
int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
|
||||
int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
|
||||
int enic_dev_notify_unset(struct enic *enic);
|
||||
int enic_dev_hang_notify(struct enic *enic);
|
||||
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
|
||||
|
|
|
@ -1300,7 +1300,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
|||
}
|
||||
|
||||
if (vlan_stripped)
|
||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
||||
|
||||
if (netdev->features & NETIF_F_GRO)
|
||||
napi_gro_receive(&enic->napi[q_number], skb);
|
||||
|
@ -2496,9 +2496,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netdev->watchdog_timeo = 2 * HZ;
|
||||
netdev->ethtool_ops = &enic_ethtool_ops;
|
||||
|
||||
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
if (ENIC_SETTING(enic, LOOP)) {
|
||||
netdev->features &= ~NETIF_F_HW_VLAN_TX;
|
||||
netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
enic->loop_enable = 1;
|
||||
enic->loop_tag = enic->config.loop_tag;
|
||||
dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
|
||||
|
|
|
@ -771,7 +771,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
|
|||
|
||||
if (vlan_tx_tag_present(skb)) {
|
||||
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
|
||||
__vlan_put_tag(skb, vlan_tag);
|
||||
__vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
skb->vlan_tci = 0;
|
||||
}
|
||||
|
||||
|
@ -902,7 +902,7 @@ static int be_vid_config(struct be_adapter *adapter)
|
|||
return status;
|
||||
}
|
||||
|
||||
static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
|
||||
static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
int status = 0;
|
||||
|
@ -928,7 +928,7 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
|
|||
return status;
|
||||
}
|
||||
|
||||
static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
|
||||
static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
int status = 0;
|
||||
|
@ -1383,7 +1383,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
|
|||
|
||||
|
||||
if (rxcp->vlanf)
|
||||
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
|
||||
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
@ -1439,7 +1439,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
|
|||
skb->rxhash = rxcp->rss_hash;
|
||||
|
||||
if (rxcp->vlanf)
|
||||
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
|
||||
|
||||
napi_gro_frags(napi);
|
||||
}
|
||||
|
@ -3663,12 +3663,12 @@ static void be_netdev_init(struct net_device *netdev)
|
|||
|
||||
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if (be_multi_rxq(adapter))
|
||||
netdev->hw_features |= NETIF_F_RXHASH;
|
||||
|
||||
netdev->features |= netdev->hw_features |
|
||||
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
||||
|
|
|
@ -386,7 +386,7 @@ static void gfar_init_mac(struct net_device *ndev)
|
|||
priv->uses_rxfcb = 1;
|
||||
}
|
||||
|
||||
if (ndev->features & NETIF_F_HW_VLAN_RX) {
|
||||
if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
|
||||
priv->uses_rxfcb = 1;
|
||||
}
|
||||
|
@ -1050,8 +1050,9 @@ static int gfar_probe(struct platform_device *ofdev)
|
|||
}
|
||||
|
||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_RX;
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
}
|
||||
|
||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
|
||||
|
@ -2348,7 +2349,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
|
|||
local_irq_save(flags);
|
||||
lock_rx_qs(priv);
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_TX) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_TX) {
|
||||
/* Enable VLAN tag insertion */
|
||||
tempval = gfar_read(®s->tctrl);
|
||||
tempval |= TCTRL_VLINS;
|
||||
|
@ -2360,7 +2361,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
|
|||
gfar_write(®s->tctrl, tempval);
|
||||
}
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_RX) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
/* Enable VLAN tag extraction */
|
||||
tempval = gfar_read(®s->rctrl);
|
||||
tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
|
||||
|
@ -2724,11 +2725,11 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
|
|||
/* Tell the skb what kind of packet this is */
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
/* There's need to check for NETIF_F_HW_VLAN_RX here.
|
||||
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
|
||||
* Even if vlan rx accel is disabled, on some chips
|
||||
* RXFCB_VLN is pseudo randomly set.
|
||||
*/
|
||||
if (dev->features & NETIF_F_HW_VLAN_RX &&
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
|
||||
fcb->flags & RXFCB_VLN)
|
||||
__vlan_hwaccel_put_tag(skb, fcb->vlctl);
|
||||
|
||||
|
|
|
@ -542,7 +542,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
|
|||
int err = 0, i = 0;
|
||||
netdev_features_t changed = dev->features ^ features;
|
||||
|
||||
if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
|
||||
if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
|
||||
gfar_vlan_mode(dev, features);
|
||||
|
||||
if (!(changed & NETIF_F_RXCSUM))
|
||||
|
|
|
@ -2110,7 +2110,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
||||
static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
struct ehea_adapter *adapter = port->adapter;
|
||||
|
@ -2148,7 +2148,7 @@ static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
struct ehea_adapter *adapter = port->adapter;
|
||||
|
@ -3021,11 +3021,11 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
|
|||
ehea_set_ethtool_ops(dev);
|
||||
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_TSO
|
||||
| NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX;
|
||||
| NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
|
||||
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
|
||||
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
|
||||
| NETIF_F_RXCSUM;
|
||||
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
|
||||
| NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
||||
| NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
|
||||
dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
|
||||
NETIF_F_IP_CSUM;
|
||||
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
|
||||
|
|
|
@ -166,8 +166,10 @@ static void e1000_vlan_mode(struct net_device *netdev,
|
|||
netdev_features_t features);
|
||||
static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
|
||||
bool filter_on);
|
||||
static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
|
||||
static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
|
||||
static int e1000_vlan_rx_add_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid);
|
||||
static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid);
|
||||
static void e1000_restore_vlan(struct e1000_adapter *adapter);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -333,7 +335,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
|
|||
if (!test_bit(vid, adapter->active_vlans)) {
|
||||
if (hw->mng_cookie.status &
|
||||
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
|
||||
e1000_vlan_rx_add_vid(netdev, vid);
|
||||
e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
|
||||
adapter->mng_vlan_id = vid;
|
||||
} else {
|
||||
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
|
||||
|
@ -341,7 +343,8 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
|
|||
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
|
||||
(vid != old_vid) &&
|
||||
!test_bit(old_vid, adapter->active_vlans))
|
||||
e1000_vlan_rx_kill_vid(netdev, old_vid);
|
||||
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
|
||||
old_vid);
|
||||
} else {
|
||||
adapter->mng_vlan_id = vid;
|
||||
}
|
||||
|
@ -809,10 +812,10 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
|
|||
/* Since there is no support for separate Rx/Tx vlan accel
|
||||
* enable/disable make sure Tx flag is always in same state as Rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -823,7 +826,7 @@ static int e1000_set_features(struct net_device *netdev,
|
|||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
netdev_features_t changed = features ^ netdev->features;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
e1000_vlan_mode(netdev, features);
|
||||
|
||||
if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
|
||||
|
@ -1058,9 +1061,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (hw->mac_type >= e1000_82543) {
|
||||
netdev->hw_features = NETIF_F_SG |
|
||||
NETIF_F_HW_CSUM |
|
||||
NETIF_F_HW_VLAN_RX;
|
||||
netdev->features = NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
|
||||
if ((hw->mac_type >= e1000_82544) &&
|
||||
|
@ -1457,7 +1460,8 @@ static int e1000_close(struct net_device *netdev)
|
|||
if ((hw->mng_cookie.status &
|
||||
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
|
||||
!test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
|
||||
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
|
||||
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
|
||||
adapter->mng_vlan_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3999,7 +4003,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
|
|||
if (status & E1000_RXD_STAT_VP) {
|
||||
u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
napi_gro_receive(&adapter->napi, skb);
|
||||
}
|
||||
|
@ -4785,7 +4789,7 @@ static void __e1000_vlan_mode(struct e1000_adapter *adapter,
|
|||
u32 ctrl;
|
||||
|
||||
ctrl = er32(CTRL);
|
||||
if (features & NETIF_F_HW_VLAN_RX) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
/* enable VLAN tag insert/strip */
|
||||
ctrl |= E1000_CTRL_VME;
|
||||
} else {
|
||||
|
@ -4837,7 +4841,8 @@ static void e1000_vlan_mode(struct net_device *netdev,
|
|||
e1000_irq_enable(adapter);
|
||||
}
|
||||
|
||||
static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||
static int e1000_vlan_rx_add_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
@ -4862,7 +4867,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||
static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
@ -4896,7 +4902,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
|
|||
|
||||
e1000_vlan_filter_on_off(adapter, true);
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
e1000_vlan_rx_add_vid(adapter->netdev, vid);
|
||||
e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
|
||||
|
|
|
@ -554,7 +554,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
|
|||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
|
||||
if (staterr & E1000_RXD_STAT_VP)
|
||||
__vlan_hwaccel_put_tag(skb, tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
|
||||
|
||||
napi_gro_receive(&adapter->napi, skb);
|
||||
}
|
||||
|
@ -2672,7 +2672,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
|
|||
return work_done;
|
||||
}
|
||||
|
||||
static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||
static int e1000_vlan_rx_add_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
@ -2697,7 +2698,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||
static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
@ -2741,7 +2743,8 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
|
|||
ew32(RCTL, rctl);
|
||||
|
||||
if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
|
||||
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
|
||||
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
|
||||
adapter->mng_vlan_id);
|
||||
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
|
||||
}
|
||||
}
|
||||
|
@ -2802,22 +2805,22 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
|
|||
u16 old_vid = adapter->mng_vlan_id;
|
||||
|
||||
if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
|
||||
e1000_vlan_rx_add_vid(netdev, vid);
|
||||
e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
|
||||
adapter->mng_vlan_id = vid;
|
||||
}
|
||||
|
||||
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
|
||||
e1000_vlan_rx_kill_vid(netdev, old_vid);
|
||||
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
|
||||
}
|
||||
|
||||
static void e1000_restore_vlan(struct e1000_adapter *adapter)
|
||||
{
|
||||
u16 vid;
|
||||
|
||||
e1000_vlan_rx_add_vid(adapter->netdev, 0);
|
||||
e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
|
||||
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
e1000_vlan_rx_add_vid(adapter->netdev, vid);
|
||||
e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
|
||||
|
@ -3373,7 +3376,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
|
|||
|
||||
ew32(RCTL, rctl);
|
||||
|
||||
if (netdev->features & NETIF_F_HW_VLAN_RX)
|
||||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
e1000e_vlan_strip_enable(adapter);
|
||||
else
|
||||
e1000e_vlan_strip_disable(adapter);
|
||||
|
@ -4384,7 +4387,8 @@ static int e1000_close(struct net_device *netdev)
|
|||
* the same ID is registered on the host OS (let 8021q kill it)
|
||||
*/
|
||||
if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
|
||||
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
|
||||
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
|
||||
adapter->mng_vlan_id);
|
||||
|
||||
/* If AMT is enabled, let the firmware know that the network
|
||||
* interface is now closed
|
||||
|
@ -6418,7 +6422,7 @@ static int e1000_set_features(struct net_device *netdev,
|
|||
if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
|
||||
adapter->flags |= FLAG_TSO_FORCE;
|
||||
|
||||
if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
|
||||
if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
|
||||
NETIF_F_RXALL)))
|
||||
return 0;
|
||||
|
@ -6629,8 +6633,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
/* Set initial default active device features */
|
||||
netdev->features = (NETIF_F_SG |
|
||||
NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_TSO |
|
||||
NETIF_F_TSO6 |
|
||||
NETIF_F_RXHASH |
|
||||
|
@ -6644,7 +6648,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netdev->hw_features |= NETIF_F_RXALL;
|
||||
|
||||
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
|
||||
netdev->features |= NETIF_F_HW_VLAN_FILTER;
|
||||
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
netdev->vlan_features |= (NETIF_F_SG |
|
||||
NETIF_F_TSO |
|
||||
|
|
|
@ -159,8 +159,8 @@ static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
|
|||
static void igb_tx_timeout(struct net_device *);
|
||||
static void igb_reset_task(struct work_struct *);
|
||||
static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
|
||||
static int igb_vlan_rx_add_vid(struct net_device *, u16);
|
||||
static int igb_vlan_rx_kill_vid(struct net_device *, u16);
|
||||
static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
|
||||
static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
|
||||
static void igb_restore_vlan(struct igb_adapter *);
|
||||
static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
|
||||
static void igb_ping_all_vfs(struct igb_adapter *);
|
||||
|
@ -1860,10 +1860,10 @@ static netdev_features_t igb_fix_features(struct net_device *netdev,
|
|||
/* Since there is no support for separate Rx/Tx vlan accel
|
||||
* enable/disable make sure Tx flag is always in same state as Rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -1874,7 +1874,7 @@ static int igb_set_features(struct net_device *netdev,
|
|||
netdev_features_t changed = netdev->features ^ features;
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
igb_vlan_mode(netdev, features);
|
||||
|
||||
if (!(changed & NETIF_F_RXALL))
|
||||
|
@ -2127,15 +2127,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
NETIF_F_TSO6 |
|
||||
NETIF_F_RXHASH |
|
||||
NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
/* copy netdev features into list of user selectable features */
|
||||
netdev->hw_features |= netdev->features;
|
||||
netdev->hw_features |= NETIF_F_RXALL;
|
||||
|
||||
/* set this bit last since it cannot be part of hw_features */
|
||||
netdev->features |= NETIF_F_HW_VLAN_FILTER;
|
||||
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
netdev->vlan_features |= NETIF_F_TSO |
|
||||
NETIF_F_TSO6 |
|
||||
|
@ -6674,7 +6674,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
|
|||
|
||||
igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
|
||||
|
||||
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
|
||||
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
|
||||
u16 vid;
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
|
||||
|
@ -6683,7 +6683,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
|
|||
else
|
||||
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
skb_record_rx_queue(skb, rx_ring->queue_index);
|
||||
|
@ -6954,7 +6954,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
|
|||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 ctrl, rctl;
|
||||
bool enable = !!(features & NETIF_F_HW_VLAN_RX);
|
||||
bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
|
||||
|
||||
if (enable) {
|
||||
/* enable VLAN tag insert/strip */
|
||||
|
@ -6976,7 +6976,8 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
|
|||
igb_rlpml_set(adapter);
|
||||
}
|
||||
|
||||
static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||
static int igb_vlan_rx_add_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
@ -6993,7 +6994,8 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||
static int igb_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
@ -7019,7 +7021,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
|
|||
igb_vlan_mode(adapter->netdev, adapter->netdev->features);
|
||||
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
igb_vlan_rx_add_vid(adapter->netdev, vid);
|
||||
igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
|
||||
|
|
|
@ -116,7 +116,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
|
|||
else
|
||||
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
|
||||
if (test_bit(vid, adapter->active_vlans))
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
napi_gro_receive(&adapter->rx_ring->napi, skb);
|
||||
|
@ -1230,7 +1230,8 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
|
|||
e1000_rlpml_set_vf(hw, max_frame_size);
|
||||
}
|
||||
|
||||
static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||
static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct igbvf_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
@ -1243,7 +1244,8 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||
static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct igbvf_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
@ -1262,7 +1264,7 @@ static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
|
|||
u16 vid;
|
||||
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
igbvf_vlan_rx_add_vid(adapter->netdev, vid);
|
||||
igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2722,9 +2724,9 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
NETIF_F_RXCSUM;
|
||||
|
||||
netdev->features = netdev->hw_features |
|
||||
NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
if (pci_using_dac)
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
|
|
|
@ -101,8 +101,10 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
|
|||
|
||||
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
|
||||
static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
|
||||
static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
|
||||
static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
|
||||
static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid);
|
||||
static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid);
|
||||
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
@ -332,8 +334,8 @@ ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
|
|||
* Tx VLAN insertion does not work per HW design when Rx stripping is
|
||||
* disabled.
|
||||
*/
|
||||
if (!(features & NETIF_F_HW_VLAN_RX))
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -344,7 +346,7 @@ ixgb_set_features(struct net_device *netdev, netdev_features_t features)
|
|||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
netdev_features_t changed = features ^ netdev->features;
|
||||
|
||||
if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
|
||||
if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
|
||||
return 0;
|
||||
|
||||
adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
|
||||
|
@ -479,10 +481,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netdev->hw_features = NETIF_F_SG |
|
||||
NETIF_F_TSO |
|
||||
NETIF_F_HW_CSUM |
|
||||
NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
netdev->features = netdev->hw_features |
|
||||
NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
netdev->hw_features |= NETIF_F_RXCSUM;
|
||||
|
||||
if (pci_using_dac) {
|
||||
|
@ -1140,7 +1142,7 @@ ixgb_set_multi(struct net_device *netdev)
|
|||
}
|
||||
|
||||
alloc_failed:
|
||||
if (netdev->features & NETIF_F_HW_VLAN_RX)
|
||||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
ixgb_vlan_strip_enable(adapter);
|
||||
else
|
||||
ixgb_vlan_strip_disable(adapter);
|
||||
|
@ -2080,8 +2082,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
|
|||
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
if (status & IXGB_RX_DESC_STATUS_VP)
|
||||
__vlan_hwaccel_put_tag(skb,
|
||||
le16_to_cpu(rx_desc->special));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||
le16_to_cpu(rx_desc->special));
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
|
@ -2209,7 +2211,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
|
|||
}
|
||||
|
||||
static int
|
||||
ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||
ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
u32 vfta, index;
|
||||
|
@ -2226,7 +2228,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
|||
}
|
||||
|
||||
static int
|
||||
ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||
ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(netdev);
|
||||
u32 vfta, index;
|
||||
|
@ -2248,7 +2250,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
|
|||
u16 vid;
|
||||
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
ixgb_vlan_rx_add_vid(adapter->netdev, vid);
|
||||
ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
|
|
@ -1488,10 +1488,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
|
|||
|
||||
ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
|
||||
|
||||
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
|
||||
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
|
||||
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
skb_record_rx_queue(skb, rx_ring->queue_index);
|
||||
|
@ -3467,7 +3467,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
|
|||
hw->mac.ops.enable_rx_dma(hw, rxctrl);
|
||||
}
|
||||
|
||||
static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||
static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
|
@ -3479,7 +3480,8 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
|
@ -3584,10 +3586,10 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
|
|||
{
|
||||
u16 vid;
|
||||
|
||||
ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
|
||||
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
|
||||
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
|
||||
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3722,7 +3724,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
|
|||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
|
||||
|
||||
if (netdev->features & NETIF_F_HW_VLAN_RX)
|
||||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
ixgbe_vlan_strip_enable(adapter);
|
||||
else
|
||||
ixgbe_vlan_strip_disable(adapter);
|
||||
|
@ -7024,7 +7026,7 @@ static int ixgbe_set_features(struct net_device *netdev,
|
|||
break;
|
||||
}
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
ixgbe_vlan_strip_enable(adapter);
|
||||
else
|
||||
ixgbe_vlan_strip_disable(adapter);
|
||||
|
@ -7431,9 +7433,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netdev->features = NETIF_F_SG |
|
||||
NETIF_F_IP_CSUM |
|
||||
NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_FILTER |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||
NETIF_F_TSO |
|
||||
NETIF_F_TSO6 |
|
||||
NETIF_F_RXHASH |
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include <linux/ip.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/ipv6.h>
|
||||
#ifdef NETIF_F_HW_VLAN_TX
|
||||
#ifdef NETIF_F_HW_VLAN_CTAG_TX
|
||||
#include <linux/if_vlan.h>
|
||||
#endif
|
||||
|
||||
|
|
|
@ -291,7 +291,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
|
|||
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
|
||||
|
||||
if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
|
||||
__vlan_hwaccel_put_tag(skb, tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
|
||||
|
||||
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
|
||||
napi_gro_receive(&q_vector->napi, skb);
|
||||
|
@ -1179,7 +1179,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
|
|||
}
|
||||
}
|
||||
|
||||
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
|
@ -1204,7 +1205,8 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||
static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
|
@ -1227,7 +1229,8 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
|
|||
u16 vid;
|
||||
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
|
||||
ixgbevf_vlan_rx_add_vid(adapter->netdev,
|
||||
htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
|
||||
|
@ -3410,9 +3413,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
NETIF_F_RXCSUM;
|
||||
|
||||
netdev->features = netdev->hw_features |
|
||||
NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
netdev->vlan_features |= NETIF_F_TSO;
|
||||
netdev->vlan_features |= NETIF_F_TSO6;
|
||||
|
|
|
@ -1059,7 +1059,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
|
|||
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
|
||||
u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
NET_STAT(jme).rx_bytes += 4;
|
||||
}
|
||||
jme->jme_rx(skb);
|
||||
|
@ -3030,8 +3030,8 @@ jme_init_one(struct pci_dev *pdev,
|
|||
NETIF_F_SG |
|
||||
NETIF_F_TSO |
|
||||
NETIF_F_TSO6 |
|
||||
NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
if (using_dac)
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
|
||||
|
|
|
@ -1421,14 +1421,14 @@ static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
|
|||
struct sky2_hw *hw = sky2->hw;
|
||||
u16 port = sky2->port;
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
|
||||
RX_VLAN_STRIP_ON);
|
||||
else
|
||||
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
|
||||
RX_VLAN_STRIP_OFF);
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_TX) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_TX) {
|
||||
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
|
||||
TX_VLAN_TAG_ON);
|
||||
|
||||
|
@ -2713,7 +2713,7 @@ static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
|
|||
struct sk_buff *skb;
|
||||
|
||||
skb = sky2->rx_ring[sky2->rx_next].skb;
|
||||
__vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
|
||||
}
|
||||
|
||||
static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
|
||||
|
@ -4406,7 +4406,7 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
|
|||
if (changed & NETIF_F_RXHASH)
|
||||
rx_set_rss(dev, features);
|
||||
|
||||
if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
|
||||
if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
|
||||
sky2_vlan_mode(dev, features);
|
||||
|
||||
return 0;
|
||||
|
@ -4793,7 +4793,8 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
|
|||
dev->hw_features |= NETIF_F_RXHASH;
|
||||
|
||||
if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->vlan_features |= SKY2_VLAN_OFFLOADS;
|
||||
}
|
||||
|
||||
|
|
|
@ -356,7 +356,8 @@ static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
||||
static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
|
@ -381,7 +382,8 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
|
@ -2082,8 +2084,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
|
||||
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
|
||||
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
|
||||
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
dev->hw_features |= NETIF_F_LOOPBACK;
|
||||
|
||||
if (mdev->dev->caps.steering_mode ==
|
||||
|
|
|
@ -673,7 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|||
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
|
||||
u16 vid = be16_to_cpu(cqe->sl_vid);
|
||||
|
||||
__vlan_hwaccel_put_tag(gro_skb, vid);
|
||||
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
if (dev->features & NETIF_F_RXHASH)
|
||||
|
@ -716,7 +716,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|||
|
||||
if (be32_to_cpu(cqe->vlan_my_qpn) &
|
||||
MLX4_CQE_VLAN_PRESENT_MASK)
|
||||
__vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
|
||||
|
||||
/* Push it up the stack */
|
||||
netif_receive_skb(skb);
|
||||
|
|
|
@ -1281,7 +1281,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
|
|||
va = addr;
|
||||
va += MXGEFW_PAD;
|
||||
veh = (struct vlan_ethhdr *)va;
|
||||
if ((dev->features & NETIF_F_HW_VLAN_RX) == NETIF_F_HW_VLAN_RX &&
|
||||
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
|
||||
NETIF_F_HW_VLAN_CTAG_RX &&
|
||||
veh->h_vlan_proto == htons(ETH_P_8021Q)) {
|
||||
/* fixup csum if needed */
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
||||
|
@ -1289,7 +1290,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
|
|||
skb->csum = csum_sub(skb->csum, vsum);
|
||||
}
|
||||
/* pop tag */
|
||||
__vlan_hwaccel_put_tag(skb, ntohs(veh->h_vlan_TCI));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
|
||||
memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
|
||||
skb->len -= VLAN_HLEN;
|
||||
skb->data_len -= VLAN_HLEN;
|
||||
|
@ -3887,8 +3888,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netdev->mtu = myri10ge_initial_mtu;
|
||||
netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
|
||||
|
||||
/* fake NETIF_F_HW_VLAN_RX for good GRO performance */
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_RX;
|
||||
/* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
netdev->features = netdev->hw_features;
|
||||
|
||||
|
|
|
@ -911,7 +911,7 @@ static void rx_irq(struct net_device *ndev)
|
|||
unsigned short tag;
|
||||
|
||||
tag = ntohs(extsts & EXTSTS_VTG_MASK);
|
||||
__vlan_hwaccel_put_tag(skb, tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
|
||||
}
|
||||
#endif
|
||||
rx_rc = netif_rx(skb);
|
||||
|
@ -2193,7 +2193,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
|
|||
|
||||
#ifdef NS83820_VLAN_ACCEL_SUPPORT
|
||||
/* We also support hardware vlan acceleration */
|
||||
ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
#endif
|
||||
|
||||
if (using_dac) {
|
||||
|
|
|
@ -7920,7 +7920,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
|
|||
NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_RXCSUM | NETIF_F_LRO;
|
||||
dev->features |= dev->hw_features |
|
||||
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
if (sp->device_type & XFRAME_II_DEVICE) {
|
||||
dev->hw_features |= NETIF_F_UFO;
|
||||
if (ufo)
|
||||
|
@ -8555,7 +8555,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
|
|||
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
if (vlan_tag && sp->vlan_strip_flag)
|
||||
__vlan_hwaccel_put_tag(skb, vlan_tag);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
if (sp->config.napi)
|
||||
netif_receive_skb(skb);
|
||||
else
|
||||
|
|
|
@ -312,7 +312,7 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
|
|||
|
||||
if (ext_info->vlan &&
|
||||
ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
|
||||
__vlan_hwaccel_put_tag(skb, ext_info->vlan);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
|
||||
napi_gro_receive(ring->napi_p, skb);
|
||||
|
||||
vxge_debug_entryexit(VXGE_TRACE,
|
||||
|
@ -3300,12 +3300,13 @@ static void vxge_tx_watchdog(struct net_device *dev)
|
|||
/**
|
||||
* vxge_vlan_rx_add_vid
|
||||
* @dev: net device pointer.
|
||||
* @proto: vlan protocol
|
||||
* @vid: vid
|
||||
*
|
||||
* Add the vlan id to the devices vlan id table
|
||||
*/
|
||||
static int
|
||||
vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
||||
vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct vxgedev *vdev = netdev_priv(dev);
|
||||
struct vxge_vpath *vpath;
|
||||
|
@ -3323,14 +3324,15 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
|||
}
|
||||
|
||||
/**
|
||||
* vxge_vlan_rx_add_vid
|
||||
* vxge_vlan_rx_kill_vid
|
||||
* @dev: net device pointer.
|
||||
* @proto: vlan protocol
|
||||
* @vid: vid
|
||||
*
|
||||
* Remove the vlan id from the device's vlan id table
|
||||
*/
|
||||
static int
|
||||
vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct vxgedev *vdev = netdev_priv(dev);
|
||||
struct vxge_vpath *vpath;
|
||||
|
@ -3415,12 +3417,12 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
|
|||
ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if (vdev->config.rth_steering != NO_STEERING)
|
||||
ndev->hw_features |= NETIF_F_RXHASH;
|
||||
|
||||
ndev->features |= ndev->hw_features |
|
||||
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
|
||||
ndev->netdev_ops = &vxge_netdev_ops;
|
||||
|
|
|
@ -2961,15 +2961,15 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
|
|||
vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
|
||||
|
||||
/*
|
||||
* There's need to check for NETIF_F_HW_VLAN_RX here.
|
||||
* Even if vlan rx accel is disabled,
|
||||
* There's need to check for NETIF_F_HW_VLAN_CTAG_RX
|
||||
* here. Even if vlan rx accel is disabled,
|
||||
* NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
|
||||
*/
|
||||
if (dev->features & NETIF_F_HW_VLAN_RX &&
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
|
||||
vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
|
||||
u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
napi_gro_receive(&np->napi, skb);
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
|
@ -4816,7 +4816,7 @@ static netdev_features_t nv_fix_features(struct net_device *dev,
|
|||
netdev_features_t features)
|
||||
{
|
||||
/* vlan is dependent on rx checksum offload */
|
||||
if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
|
||||
if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
|
||||
features |= NETIF_F_RXCSUM;
|
||||
|
||||
return features;
|
||||
|
@ -4828,12 +4828,12 @@ static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
|
|||
|
||||
spin_lock_irq(&np->lock);
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
|
||||
else
|
||||
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_TX)
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_TX)
|
||||
np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
|
||||
else
|
||||
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
|
||||
|
@ -4870,7 +4870,7 @@ static int nv_set_features(struct net_device *dev, netdev_features_t features)
|
|||
spin_unlock_irq(&np->lock);
|
||||
}
|
||||
|
||||
if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
|
||||
if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
|
||||
nv_vlan_mode(dev, features);
|
||||
|
||||
return 0;
|
||||
|
@ -5705,7 +5705,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
|
|||
np->vlanctl_bits = 0;
|
||||
if (id->driver_data & DEV_HAS_VLAN) {
|
||||
np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
}
|
||||
|
||||
dev->features |= dev->hw_features;
|
||||
|
@ -5996,7 +5997,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
|
|||
dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
|
||||
dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
|
||||
"csum " : "",
|
||||
dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
|
||||
dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_TX) ?
|
||||
"vlan " : "",
|
||||
dev->features & (NETIF_F_LOOPBACK) ?
|
||||
"loopback " : "",
|
||||
|
|
|
@ -1345,7 +1345,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
|
|||
}
|
||||
|
||||
if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_TX;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
|
||||
netdev->hw_features |= NETIF_F_LRO;
|
||||
|
|
|
@ -1050,7 +1050,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
|
|||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
|
||||
if (vid != 0xffff)
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
|
||||
napi_gro_receive(&sds_ring->napi, skb);
|
||||
|
||||
|
@ -1153,7 +1153,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
|
|||
}
|
||||
|
||||
if (vid != 0xffff)
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
netif_receive_skb(skb);
|
||||
|
||||
adapter->stats.lro_pkts++;
|
||||
|
@ -1518,7 +1518,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
|
|||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
|
||||
if (vid != 0xffff)
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
|
||||
napi_gro_receive(&sds_ring->napi, skb);
|
||||
|
||||
|
@ -1615,7 +1615,7 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
|
|||
}
|
||||
|
||||
if (vid != 0xffff)
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
|
|
|
@ -86,8 +86,8 @@ static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
|
|||
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
|
||||
static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
|
||||
struct qlcnic_esw_func_cfg *);
|
||||
static int qlcnic_vlan_rx_add(struct net_device *, u16);
|
||||
static int qlcnic_vlan_rx_del(struct net_device *, u16);
|
||||
static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
|
||||
static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
|
||||
|
||||
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
|
||||
((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
|
||||
|
@ -902,7 +902,7 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
|
|||
}
|
||||
|
||||
static int
|
||||
qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
|
||||
qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct qlcnic_adapter *adapter = netdev_priv(netdev);
|
||||
set_bit(vid, adapter->vlans);
|
||||
|
@ -910,7 +910,7 @@ qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
|
|||
}
|
||||
|
||||
static int
|
||||
qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
|
||||
qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct qlcnic_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
|
@ -1714,7 +1714,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
|
|||
|
||||
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_IPV6_CSUM | NETIF_F_GRO |
|
||||
NETIF_F_HW_VLAN_RX);
|
||||
NETIF_F_HW_VLAN_CTAG_RX);
|
||||
netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
|
||||
NETIF_F_IPV6_CSUM);
|
||||
|
||||
|
@ -1729,7 +1729,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
|
|||
}
|
||||
|
||||
if (qlcnic_vlan_tx_check(adapter))
|
||||
netdev->features |= (NETIF_F_HW_VLAN_TX);
|
||||
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
|
||||
|
||||
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
|
||||
netdev->features |= NETIF_F_LRO;
|
||||
|
@ -3346,7 +3346,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
|
|||
|
||||
rcu_read_lock();
|
||||
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
|
||||
dev = __vlan_find_dev_deep(netdev, vid);
|
||||
dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid);
|
||||
if (!dev)
|
||||
continue;
|
||||
qlcnic_config_indev_addr(adapter, dev, event);
|
||||
|
|
|
@ -409,7 +409,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
|
|||
(qdev->
|
||||
func << CAM_OUT_FUNC_SHIFT) |
|
||||
(0 << CAM_OUT_CQ_ID_SHIFT));
|
||||
if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
|
||||
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
cam_output |= CAM_OUT_RV;
|
||||
/* route to NIC core */
|
||||
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
|
||||
|
@ -1498,7 +1498,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
|
|||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb_record_rx_queue(skb, rx_ring->cq_id);
|
||||
if (vlan_id != 0xffff)
|
||||
__vlan_hwaccel_put_tag(skb, vlan_id);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
||||
napi_gro_frags(napi);
|
||||
}
|
||||
|
||||
|
@ -1574,7 +1574,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
|
|||
|
||||
skb_record_rx_queue(skb, rx_ring->cq_id);
|
||||
if (vlan_id != 0xffff)
|
||||
__vlan_hwaccel_put_tag(skb, vlan_id);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
||||
napi_gro_receive(napi, skb);
|
||||
else
|
||||
|
@ -1670,7 +1670,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
|
|||
|
||||
skb_record_rx_queue(skb, rx_ring->cq_id);
|
||||
if (vlan_id != 0xffff)
|
||||
__vlan_hwaccel_put_tag(skb, vlan_id);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
||||
napi_gro_receive(&rx_ring->napi, skb);
|
||||
else
|
||||
|
@ -1975,7 +1975,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
|
|||
rx_ring->rx_bytes += skb->len;
|
||||
skb_record_rx_queue(skb, rx_ring->cq_id);
|
||||
if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
|
||||
__vlan_hwaccel_put_tag(skb, vlan_id);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
||||
napi_gro_receive(&rx_ring->napi, skb);
|
||||
else
|
||||
|
@ -2279,7 +2279,7 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
|
|||
{
|
||||
struct ql_adapter *qdev = netdev_priv(ndev);
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_RX) {
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
|
||||
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
|
||||
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
|
||||
} else {
|
||||
|
@ -2294,10 +2294,10 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
|
|||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_TX;
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -2307,7 +2307,7 @@ static int qlge_set_features(struct net_device *ndev,
|
|||
{
|
||||
netdev_features_t changed = ndev->features ^ features;
|
||||
|
||||
if (changed & NETIF_F_HW_VLAN_RX)
|
||||
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
qlge_vlan_mode(ndev, features);
|
||||
|
||||
return 0;
|
||||
|
@ -2326,7 +2326,7 @@ static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
|
||||
static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct ql_adapter *qdev = netdev_priv(ndev);
|
||||
int status;
|
||||
|
@ -2357,7 +2357,7 @@ static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
|
||||
static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct ql_adapter *qdev = netdev_priv(ndev);
|
||||
int status;
|
||||
|
@ -4665,9 +4665,9 @@ static int qlge_probe(struct pci_dev *pdev,
|
|||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO_ECN |
|
||||
NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
|
||||
ndev->features = ndev->hw_features |
|
||||
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
ndev->vlan_features = ndev->hw_features;
|
||||
|
||||
if (test_bit(QL_DMA64, &qdev->flags))
|
||||
|
|
|
@ -431,7 +431,7 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
|
|||
cp->dev->stats.rx_bytes += skb->len;
|
||||
|
||||
if (opts2 & RxVlanTagged)
|
||||
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
|
||||
|
||||
napi_gro_receive(&cp->napi, skb);
|
||||
}
|
||||
|
@ -1438,7 +1438,7 @@ static int cp_set_features(struct net_device *dev, netdev_features_t features)
|
|||
else
|
||||
cp->cpcmd &= ~RxChkSum;
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
cp->cpcmd |= RxVlanOn;
|
||||
else
|
||||
cp->cpcmd &= ~RxVlanOn;
|
||||
|
@ -1955,14 +1955,14 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
dev->ethtool_ops = &cp_ethtool_ops;
|
||||
dev->watchdog_timeo = TX_TIMEOUT;
|
||||
|
||||
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
if (pci_using_dac)
|
||||
dev->features |= NETIF_F_HIGHDMA;
|
||||
|
||||
/* disabled by default until verified */
|
||||
dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
|
||||
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
|
||||
NETIF_F_HIGHDMA;
|
||||
|
||||
|
|
|
@ -1793,16 +1793,17 @@ static void __rtl8169_set_features(struct net_device *dev,
|
|||
netdev_features_t changed = features ^ dev->features;
|
||||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
|
||||
if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
|
||||
if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_VLAN_CTAG_RX)))
|
||||
return;
|
||||
|
||||
if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
|
||||
if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
|
||||
if (features & NETIF_F_RXCSUM)
|
||||
tp->cp_cmd |= RxChkSum;
|
||||
else
|
||||
tp->cp_cmd &= ~RxChkSum;
|
||||
|
||||
if (dev->features & NETIF_F_HW_VLAN_RX)
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
tp->cp_cmd |= RxVlan;
|
||||
else
|
||||
tp->cp_cmd &= ~RxVlan;
|
||||
|
@ -1842,7 +1843,7 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
|
|||
u32 opts2 = le32_to_cpu(desc->opts2);
|
||||
|
||||
if (opts2 & RxVlanTag)
|
||||
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
|
||||
}
|
||||
|
||||
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
|
@ -7036,16 +7037,17 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* don't enable SG, IP_CSUM and TSO by default - it might not work
|
||||
* properly for all devices */
|
||||
dev->features |= NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
|
||||
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
|
||||
NETIF_F_HIGHDMA;
|
||||
|
||||
if (tp->mac_version == RTL_GIGA_MAC_VER_05)
|
||||
/* 8110SCd requires hardware Rx VLAN - disallow toggling */
|
||||
dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
|
||||
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
dev->hw_features |= NETIF_F_RXALL;
|
||||
dev->hw_features |= NETIF_F_RXFCS;
|
||||
|
|
|
@ -2448,7 +2448,8 @@ static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
|
|||
return TSU_VTAG1;
|
||||
}
|
||||
|
||||
static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
|
||||
static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||
int vtag_reg_index = sh_eth_get_vtag_index(mdp);
|
||||
|
@ -2478,7 +2479,8 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
|
||||
static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||
int vtag_reg_index = sh_eth_get_vtag_index(mdp);
|
||||
|
@ -2749,7 +2751,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
|
|||
goto out_release;
|
||||
}
|
||||
mdp->port = devno % 2;
|
||||
ndev->features = NETIF_F_HW_VLAN_FILTER;
|
||||
ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
|
||||
/* initialize first or needed device */
|
||||
|
|
|
@ -2679,7 +2679,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
|
|||
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
/* Both mac100 and gmac support receive VLAN tag detection */
|
||||
ndev->features |= NETIF_F_HW_VLAN_RX;
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
#endif
|
||||
priv->msg_enable = netif_msg_init(debug, default_msg_level);
|
||||
|
||||
|
|
|
@ -733,7 +733,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
|
|||
* @ndev: network device
|
||||
* @vid: VLAN vid to add
|
||||
*/
|
||||
static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
|
||||
static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
|
||||
{
|
||||
__bdx_vlan_rx_vid(ndev, vid, 1);
|
||||
return 0;
|
||||
|
@ -744,7 +744,7 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
|
|||
* @ndev: network device
|
||||
* @vid: VLAN vid to kill
|
||||
*/
|
||||
static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
|
||||
static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
|
||||
{
|
||||
__bdx_vlan_rx_vid(ndev, vid, 0);
|
||||
return 0;
|
||||
|
@ -1148,7 +1148,7 @@ NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
|
|||
priv->ndev->name,
|
||||
GET_RXD_VLAN_ID(rxd_vlan),
|
||||
GET_RXD_VTAG(rxd_val1));
|
||||
__vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
|
||||
}
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
@ -2017,12 +2017,12 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
* so we can have them same for all ports of the board */
|
||||
ndev->if_port = port;
|
||||
ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
|
||||
| NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
|
||||
| NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
|
||||
/*| NETIF_F_FRAGLIST */
|
||||
;
|
||||
ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
|
||||
NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
if (pci_using_dac)
|
||||
ndev->features |= NETIF_F_HIGHDMA;
|
||||
|
|
|
@ -1251,7 +1251,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
|
|||
}
|
||||
|
||||
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
|
||||
unsigned short vid)
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct cpsw_priv *priv = netdev_priv(ndev);
|
||||
|
||||
|
@ -1263,7 +1263,7 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
|
|||
}
|
||||
|
||||
static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
|
||||
unsigned short vid)
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct cpsw_priv *priv = netdev_priv(ndev);
|
||||
int ret;
|
||||
|
@ -1599,7 +1599,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
|
|||
priv_sl2->num_irqs = priv->num_irqs;
|
||||
}
|
||||
|
||||
ndev->features |= NETIF_F_HW_VLAN_FILTER;
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
ndev->netdev_ops = &cpsw_netdev_ops;
|
||||
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
|
||||
|
@ -1837,7 +1837,7 @@ static int cpsw_probe(struct platform_device *pdev)
|
|||
k++;
|
||||
}
|
||||
|
||||
ndev->features |= NETIF_F_HW_VLAN_FILTER;
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
ndev->netdev_ops = &cpsw_netdev_ops;
|
||||
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
|
||||
|
|
|
@ -2329,8 +2329,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
|
|||
if (SPIDER_NET_RX_CSUM_DEFAULT)
|
||||
netdev->features |= NETIF_F_RXCSUM;
|
||||
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
|
||||
/* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
|
||||
* NETIF_F_HW_VLAN_FILTER */
|
||||
/* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
||||
* NETIF_F_HW_VLAN_CTAG_FILTER */
|
||||
|
||||
netdev->irq = card->pdev->irq;
|
||||
card->num_rx_ints = 0;
|
||||
|
|
|
@ -508,8 +508,10 @@ static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
|
|||
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
||||
static const struct ethtool_ops netdev_ethtool_ops;
|
||||
static int rhine_close(struct net_device *dev);
|
||||
static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
|
||||
static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
|
||||
static int rhine_vlan_rx_add_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid);
|
||||
static int rhine_vlan_rx_kill_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid);
|
||||
static void rhine_restart_tx(struct net_device *dev);
|
||||
|
||||
static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
|
||||
|
@ -1026,8 +1028,9 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
|
||||
|
||||
if (pdev->revision >= VT6105M)
|
||||
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_FILTER;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
/* dev->name not defined before register_netdev()! */
|
||||
rc = register_netdev(dev);
|
||||
|
@ -1414,7 +1417,7 @@ static void rhine_update_vcam(struct net_device *dev)
|
|||
rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
|
||||
}
|
||||
|
||||
static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
||||
static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct rhine_private *rp = netdev_priv(dev);
|
||||
|
||||
|
@ -1425,7 +1428,7 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct rhine_private *rp = netdev_priv(dev);
|
||||
|
||||
|
@ -1933,7 +1936,7 @@ static int rhine_rx(struct net_device *dev, int limit)
|
|||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
if (unlikely(desc_length & DescTag))
|
||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
||||
netif_receive_skb(skb);
|
||||
|
||||
u64_stats_update_begin(&rp->rx_stats.syncp);
|
||||
|
|
|
@ -525,7 +525,8 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
|
|||
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
|
||||
}
|
||||
|
||||
static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
||||
static int velocity_vlan_rx_add_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct velocity_info *vptr = netdev_priv(dev);
|
||||
|
||||
|
@ -536,7 +537,8 @@ static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
static int velocity_vlan_rx_kill_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct velocity_info *vptr = netdev_priv(dev);
|
||||
|
||||
|
@ -2078,7 +2080,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
|
|||
if (rd->rdesc0.RSR & RSR_DETAG) {
|
||||
u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, vid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
netif_rx(skb);
|
||||
|
||||
|
@ -2810,9 +2812,10 @@ static int velocity_found1(struct pci_dev *pdev,
|
|||
dev->ethtool_ops = &velocity_ethtool_ops;
|
||||
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
|
||||
|
||||
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
|
||||
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
|
||||
NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
|
||||
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM;
|
||||
|
||||
ret = register_netdev(dev);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -1018,9 +1018,9 @@ static int temac_of_probe(struct platform_device *op)
|
|||
ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
|
||||
ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
|
||||
ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
|
||||
ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
|
||||
ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
|
||||
ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
|
||||
ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
|
||||
ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
|
||||
ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
|
||||
|
|
|
@ -431,7 +431,7 @@ static int netvsc_probe(struct hv_device *dev,
|
|||
|
||||
/* TODO: Add GSO and Checksum offload */
|
||||
net->hw_features = NETIF_F_SG;
|
||||
net->features = NETIF_F_SG | NETIF_F_HW_VLAN_TX;
|
||||
net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
SET_ETHTOOL_OPS(net, ðtool_ops);
|
||||
SET_NETDEV_DEV(net, &dev->device);
|
||||
|
|
|
@ -166,7 +166,8 @@ static const struct net_device_ops ifb_netdev_ops = {
|
|||
|
||||
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
|
||||
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
|
||||
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
|
||||
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
|
||||
NETIF_F_HW_VLAN_STAG_TX)
|
||||
|
||||
static void ifb_setup(struct net_device *dev)
|
||||
{
|
||||
|
|
|
@ -471,7 +471,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
|
|||
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
|
||||
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
|
||||
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
|
||||
NETIF_F_HW_VLAN_FILTER)
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
|
||||
|
||||
#define MACVLAN_STATE_MASK \
|
||||
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
|
||||
|
@ -567,21 +567,21 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
|
|||
}
|
||||
|
||||
static int macvlan_vlan_rx_add_vid(struct net_device *dev,
|
||||
unsigned short vid)
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct macvlan_dev *vlan = netdev_priv(dev);
|
||||
struct net_device *lowerdev = vlan->lowerdev;
|
||||
|
||||
return vlan_vid_add(lowerdev, vid);
|
||||
return vlan_vid_add(lowerdev, proto, vid);
|
||||
}
|
||||
|
||||
static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
|
||||
unsigned short vid)
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct macvlan_dev *vlan = netdev_priv(dev);
|
||||
struct net_device *lowerdev = vlan->lowerdev;
|
||||
|
||||
vlan_vid_del(lowerdev, vid);
|
||||
vlan_vid_del(lowerdev, proto, vid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1598,7 +1598,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|||
return stats;
|
||||
}
|
||||
|
||||
static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
|
||||
static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct team *team = netdev_priv(dev);
|
||||
struct team_port *port;
|
||||
|
@ -1610,7 +1610,7 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
|
|||
*/
|
||||
mutex_lock(&team->lock);
|
||||
list_for_each_entry(port, &team->port_list, list) {
|
||||
err = vlan_vid_add(port->dev, vid);
|
||||
err = vlan_vid_add(port->dev, proto, vid);
|
||||
if (err)
|
||||
goto unwind;
|
||||
}
|
||||
|
@ -1620,20 +1620,20 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
|
|||
|
||||
unwind:
|
||||
list_for_each_entry_continue_reverse(port, &team->port_list, list)
|
||||
vlan_vid_del(port->dev, vid);
|
||||
vlan_vid_del(port->dev, proto, vid);
|
||||
mutex_unlock(&team->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
|
||||
static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct team *team = netdev_priv(dev);
|
||||
struct team_port *port;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(port, &team->port_list, list)
|
||||
vlan_vid_del(port->dev, vid);
|
||||
vlan_vid_del(port->dev, proto, vid);
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
|
@ -1841,9 +1841,9 @@ static void team_setup(struct net_device *dev)
|
|||
dev->features |= NETIF_F_LLTX;
|
||||
dev->features |= NETIF_F_GRO;
|
||||
dev->hw_features = TEAM_VLAN_FEATURES |
|
||||
NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
|
||||
dev->features |= dev->hw_features;
|
||||
|
|
|
@ -101,7 +101,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
dev->net->flags |= IFF_NOARP;
|
||||
|
||||
/* no need to put the VLAN tci in the packet headers */
|
||||
dev->net->features |= NETIF_F_HW_VLAN_TX;
|
||||
dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
|
|||
|
||||
/* map MBIM session to VLAN */
|
||||
if (tci)
|
||||
vlan_put_tag(skb, tci);
|
||||
vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
|
||||
err:
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -255,7 +255,8 @@ static const struct net_device_ops veth_netdev_ops = {
|
|||
|
||||
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
|
||||
NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
|
||||
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
|
||||
NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
|
||||
|
||||
static void veth_setup(struct net_device *dev)
|
||||
{
|
||||
|
|
|
@ -1006,7 +1006,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
|
|||
kfree(buf);
|
||||
}
|
||||
|
||||
static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
|
||||
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
struct scatterlist sg;
|
||||
|
@ -1019,7 +1020,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
|
||||
static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
struct scatterlist sg;
|
||||
|
@ -1376,7 +1378,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
|
|||
if (vi->has_cvq) {
|
||||
vi->cvq = vqs[total_vqs - 1];
|
||||
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
|
||||
vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
|
||||
vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
|
|
|
@ -1293,7 +1293,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|||
skb->protocol = eth_type_trans(skb, adapter->netdev);
|
||||
|
||||
if (unlikely(rcd->ts))
|
||||
__vlan_hwaccel_put_tag(skb, rcd->tci);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
|
||||
|
||||
if (adapter->netdev->features & NETIF_F_LRO)
|
||||
netif_receive_skb(skb);
|
||||
|
@ -1931,7 +1931,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
|
|||
|
||||
|
||||
static int
|
||||
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
|
@ -1953,7 +1953,7 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
|||
|
||||
|
||||
static int
|
||||
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
|
@ -2107,7 +2107,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
|
|||
devRead->misc.uptFeatures |= UPT1_F_LRO;
|
||||
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
|
||||
}
|
||||
if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
|
||||
if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
|
||||
|
||||
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
|
||||
|
@ -2669,14 +2669,15 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
|
|||
struct net_device *netdev = adapter->netdev;
|
||||
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_LRO;
|
||||
if (dma64)
|
||||
netdev->hw_features |= NETIF_F_HIGHDMA;
|
||||
netdev->vlan_features = netdev->hw_features &
|
||||
~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
|
||||
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
|
||||
~(NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX);
|
||||
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -263,7 +263,8 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
|
|||
unsigned long flags;
|
||||
netdev_features_t changed = features ^ netdev->features;
|
||||
|
||||
if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) {
|
||||
if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO |
|
||||
NETIF_F_HW_VLAN_CTAG_RX)) {
|
||||
if (features & NETIF_F_RXCSUM)
|
||||
adapter->shared->devRead.misc.uptFeatures |=
|
||||
UPT1_F_RXCSUM;
|
||||
|
@ -279,7 +280,7 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
|
|||
adapter->shared->devRead.misc.uptFeatures &=
|
||||
~UPT1_F_LRO;
|
||||
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
adapter->shared->devRead.misc.uptFeatures |=
|
||||
UPT1_F_RXVLAN;
|
||||
else
|
||||
|
|
|
@ -302,7 +302,8 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
|
|||
spin_unlock_bh(&card->vlanlock);
|
||||
}
|
||||
|
||||
static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
||||
static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
struct qeth_vlan_vid *id;
|
||||
|
@ -331,7 +332,8 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct qeth_vlan_vid *id, *tmpid = NULL;
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
|
@ -959,7 +961,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
|
|||
SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
|
||||
else
|
||||
SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
|
||||
card->dev->features |= NETIF_F_HW_VLAN_FILTER;
|
||||
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
card->info.broadcast_capable = 1;
|
||||
qeth_l2_request_initial_mac(card);
|
||||
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
|
||||
|
|
|
@ -1824,7 +1824,8 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
||||
static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
|
||||
|
@ -1832,7 +1833,8 @@ static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
unsigned long flags;
|
||||
|
@ -3294,9 +3296,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
|
|||
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
|
||||
card->dev->mtu = card->info.initial_mtu;
|
||||
SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
|
||||
card->dev->features |= NETIF_F_HW_VLAN_TX |
|
||||
NETIF_F_HW_VLAN_RX |
|
||||
NETIF_F_HW_VLAN_FILTER;
|
||||
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
|
||||
card->dev->gso_max_size = 15 * PAGE_SIZE;
|
||||
|
||||
|
|
|
@ -1655,7 +1655,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
|||
skb->priority = fcoe->priority;
|
||||
|
||||
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
|
||||
fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
|
||||
fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
|
||||
skb->vlan_tci = VLAN_TAG_PRESENT |
|
||||
vlan_dev_vlan_id(fcoe->netdev);
|
||||
skb->dev = fcoe->realdev;
|
||||
|
|
|
@ -86,15 +86,15 @@ static inline int is_vlan_dev(struct net_device *dev)
|
|||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||||
|
||||
extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
|
||||
u16 vlan_id);
|
||||
__be16 vlan_proto, u16 vlan_id);
|
||||
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
|
||||
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
|
||||
|
||||
extern bool vlan_do_receive(struct sk_buff **skb);
|
||||
extern struct sk_buff *vlan_untag(struct sk_buff *skb);
|
||||
|
||||
extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
|
||||
extern void vlan_vid_del(struct net_device *dev, unsigned short vid);
|
||||
extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
|
||||
extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
|
||||
|
||||
extern int vlan_vids_add_by_dev(struct net_device *dev,
|
||||
const struct net_device *by_dev);
|
||||
|
@ -157,9 +157,20 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline bool vlan_hw_offload_capable(netdev_features_t features,
|
||||
__be16 proto)
|
||||
{
|
||||
if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
|
||||
return true;
|
||||
if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* vlan_insert_tag - regular VLAN tag inserting
|
||||
* @skb: skbuff to tag
|
||||
* @vlan_proto: VLAN encapsulation protocol
|
||||
* @vlan_tci: VLAN TCI to insert
|
||||
*
|
||||
* Inserts the VLAN tag into @skb as part of the payload
|
||||
|
@ -170,7 +181,8 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
|
|||
*
|
||||
* Does not change skb->protocol so this function can be used during receive.
|
||||
*/
|
||||
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
|
||||
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
|
||||
__be16 vlan_proto, u16 vlan_tci)
|
||||
{
|
||||
struct vlan_ethhdr *veth;
|
||||
|
||||
|
@ -185,7 +197,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
|
|||
skb->mac_header -= VLAN_HLEN;
|
||||
|
||||
/* first, the ethernet type */
|
||||
veth->h_vlan_proto = htons(ETH_P_8021Q);
|
||||
veth->h_vlan_proto = vlan_proto;
|
||||
|
||||
/* now, the TCI */
|
||||
veth->h_vlan_TCI = htons(vlan_tci);
|
||||
|
@ -204,24 +216,28 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
|
|||
* Following the skb_unshare() example, in case of error, the calling function
|
||||
* doesn't have to worry about freeing the original skb.
|
||||
*/
|
||||
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
|
||||
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
|
||||
__be16 vlan_proto, u16 vlan_tci)
|
||||
{
|
||||
skb = vlan_insert_tag(skb, vlan_tci);
|
||||
skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
|
||||
if (skb)
|
||||
skb->protocol = htons(ETH_P_8021Q);
|
||||
skb->protocol = vlan_proto;
|
||||
return skb;
|
||||
}
|
||||
|
||||
/**
|
||||
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
|
||||
* @skb: skbuff to tag
|
||||
* @vlan_proto: VLAN encapsulation protocol
|
||||
* @vlan_tci: VLAN TCI to insert
|
||||
*
|
||||
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
|
||||
*/
|
||||
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
|
||||
__be16 vlan_proto,
|
||||
u16 vlan_tci)
|
||||
{
|
||||
skb->vlan_proto = vlan_proto;
|
||||
skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
|
||||
return skb;
|
||||
}
|
||||
|
@ -236,12 +252,13 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
|
|||
* Assumes skb->dev is the target that will xmit this frame.
|
||||
* Returns a VLAN tagged skb.
|
||||
*/
|
||||
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
|
||||
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb,
|
||||
__be16 vlan_proto, u16 vlan_tci)
|
||||
{
|
||||
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
|
||||
return __vlan_hwaccel_put_tag(skb, vlan_tci);
|
||||
if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) {
|
||||
return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
|
||||
} else {
|
||||
return __vlan_put_tag(skb, vlan_tci);
|
||||
return __vlan_put_tag(skb, vlan_proto, vlan_tci);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -256,9 +273,9 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
|
|||
{
|
||||
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
|
||||
|
||||
if (veth->h_vlan_proto != htons(ETH_P_8021Q)) {
|
||||
if (veth->h_vlan_proto != htons(ETH_P_8021Q) &&
|
||||
veth->h_vlan_proto != htons(ETH_P_8021AD))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*vlan_tci = ntohs(veth->h_vlan_TCI);
|
||||
return 0;
|
||||
|
@ -294,7 +311,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
|
|||
*/
|
||||
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
|
||||
{
|
||||
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
|
||||
if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
|
||||
return __vlan_hwaccel_get_tag(skb, vlan_tci);
|
||||
} else {
|
||||
return __vlan_get_tag(skb, vlan_tci);
|
||||
|
|
|
@ -22,9 +22,12 @@ enum {
|
|||
NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */
|
||||
NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */
|
||||
NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */
|
||||
NETIF_F_HW_VLAN_TX_BIT, /* Transmit VLAN hw acceleration */
|
||||
NETIF_F_HW_VLAN_RX_BIT, /* Receive VLAN hw acceleration */
|
||||
NETIF_F_HW_VLAN_FILTER_BIT, /* Receive filtering on VLAN */
|
||||
NETIF_F_HW_VLAN_CTAG_TX_BIT, /* Transmit VLAN CTAG HW acceleration */
|
||||
NETIF_F_HW_VLAN_CTAG_RX_BIT, /* Receive VLAN CTAG HW acceleration */
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */
|
||||
NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */
|
||||
NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */
|
||||
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
|
||||
NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */
|
||||
NETIF_F_GSO_BIT, /* Enable software GSO. */
|
||||
NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */
|
||||
|
@ -80,9 +83,12 @@ enum {
|
|||
#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST)
|
||||
#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA)
|
||||
#define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM)
|
||||
#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER)
|
||||
#define NETIF_F_HW_VLAN_RX __NETIF_F(HW_VLAN_RX)
|
||||
#define NETIF_F_HW_VLAN_TX __NETIF_F(HW_VLAN_TX)
|
||||
#define NETIF_F_HW_VLAN_CTAG_FILTER __NETIF_F(HW_VLAN_CTAG_FILTER)
|
||||
#define NETIF_F_HW_VLAN_CTAG_RX __NETIF_F(HW_VLAN_CTAG_RX)
|
||||
#define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX)
|
||||
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
|
||||
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
|
||||
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
|
||||
#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM)
|
||||
#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM)
|
||||
#define NETIF_F_LLTX __NETIF_F(LLTX)
|
||||
|
|
|
@ -784,13 +784,13 @@ struct netdev_fcoe_hbainfo {
|
|||
* 3. Update dev->stats asynchronously and atomically, and define
|
||||
* neither operation.
|
||||
*
|
||||
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
|
||||
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
|
||||
* this function is called when a VLAN id is registered.
|
||||
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
|
||||
* If device support VLAN filtering this function is called when a
|
||||
* VLAN id is registered.
|
||||
*
|
||||
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
|
||||
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
|
||||
* this function is called when a VLAN id is unregistered.
|
||||
* If device support VLAN filtering this function is called when a
|
||||
* VLAN id is unregistered.
|
||||
*
|
||||
* void (*ndo_poll_controller)(struct net_device *dev);
|
||||
*
|
||||
|
@ -934,9 +934,9 @@ struct net_device_ops {
|
|||
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
|
||||
|
||||
int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
|
||||
unsigned short vid);
|
||||
__be16 proto, u16 vid);
|
||||
int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
|
||||
unsigned short vid);
|
||||
__be16 proto, u16 vid);
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
void (*ndo_poll_controller)(struct net_device *dev);
|
||||
int (*ndo_netpoll_setup)(struct net_device *dev,
|
||||
|
|
|
@ -387,6 +387,7 @@ typedef unsigned char *sk_buff_data_t;
|
|||
* @secmark: security marking
|
||||
* @mark: Generic packet mark
|
||||
* @dropcount: total number of sk_receive_queue overflows
|
||||
* @vlan_proto: vlan encapsulation protocol
|
||||
* @vlan_tci: vlan tag control information
|
||||
* @inner_transport_header: Inner transport layer header (encapsulation)
|
||||
* @inner_network_header: Network layer header (encapsulation)
|
||||
|
@ -465,6 +466,7 @@ struct sk_buff {
|
|||
|
||||
__u32 rxhash;
|
||||
|
||||
__be16 vlan_proto;
|
||||
__u16 vlan_tci;
|
||||
|
||||
#ifdef CONFIG_NET_SCHED
|
||||
|
|
|
@ -250,6 +250,7 @@ enum {
|
|||
IFLA_VLAN_FLAGS,
|
||||
IFLA_VLAN_EGRESS_QOS,
|
||||
IFLA_VLAN_INGRESS_QOS,
|
||||
IFLA_VLAN_PROTOCOL,
|
||||
__IFLA_VLAN_MAX,
|
||||
};
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#
|
||||
|
||||
config VLAN_8021Q
|
||||
tristate "802.1Q VLAN Support"
|
||||
tristate "802.1Q/802.1ad VLAN Support"
|
||||
---help---
|
||||
Select this and you will be able to create 802.1Q VLAN interfaces
|
||||
on your ethernet interfaces. 802.1Q VLAN supports almost
|
||||
|
|
|
@ -51,14 +51,18 @@ const char vlan_version[] = DRV_VERSION;
|
|||
|
||||
/* End of global variables definitions. */
|
||||
|
||||
static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
|
||||
static int vlan_group_prealloc_vid(struct vlan_group *vg,
|
||||
__be16 vlan_proto, u16 vlan_id)
|
||||
{
|
||||
struct net_device **array;
|
||||
unsigned int pidx, vidx;
|
||||
unsigned int size;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
|
||||
pidx = vlan_proto_idx(vlan_proto);
|
||||
vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
|
||||
array = vg->vlan_devices_arrays[pidx][vidx];
|
||||
if (array != NULL)
|
||||
return 0;
|
||||
|
||||
|
@ -67,7 +71,7 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
|
|||
if (array == NULL)
|
||||
return -ENOBUFS;
|
||||
|
||||
vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array;
|
||||
vg->vlan_devices_arrays[pidx][vidx] = array;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -93,7 +97,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
|
|||
if (vlan->flags & VLAN_FLAG_GVRP)
|
||||
vlan_gvrp_request_leave(dev);
|
||||
|
||||
vlan_group_set_device(grp, vlan_id, NULL);
|
||||
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
|
||||
/* Because unregister_netdevice_queue() makes sure at least one rcu
|
||||
* grace period is respected before device freeing,
|
||||
* we dont need to call synchronize_net() here.
|
||||
|
@ -112,13 +116,14 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
|
|||
* VLAN is not 0 (leave it there for 802.1p).
|
||||
*/
|
||||
if (vlan_id)
|
||||
vlan_vid_del(real_dev, vlan_id);
|
||||
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
|
||||
|
||||
/* Get rid of the vlan's reference to real_dev */
|
||||
dev_put(real_dev);
|
||||
}
|
||||
|
||||
int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
|
||||
int vlan_check_real_dev(struct net_device *real_dev,
|
||||
__be16 protocol, u16 vlan_id)
|
||||
{
|
||||
const char *name = real_dev->name;
|
||||
|
||||
|
@ -127,7 +132,7 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (vlan_find_dev(real_dev, vlan_id) != NULL)
|
||||
if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL)
|
||||
return -EEXIST;
|
||||
|
||||
return 0;
|
||||
|
@ -142,7 +147,7 @@ int register_vlan_dev(struct net_device *dev)
|
|||
struct vlan_group *grp;
|
||||
int err;
|
||||
|
||||
err = vlan_vid_add(real_dev, vlan_id);
|
||||
err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -160,7 +165,7 @@ int register_vlan_dev(struct net_device *dev)
|
|||
goto out_uninit_gvrp;
|
||||
}
|
||||
|
||||
err = vlan_group_prealloc_vid(grp, vlan_id);
|
||||
err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
|
||||
if (err < 0)
|
||||
goto out_uninit_mvrp;
|
||||
|
||||
|
@ -181,7 +186,7 @@ int register_vlan_dev(struct net_device *dev)
|
|||
/* So, got the sucker initialized, now lets place
|
||||
* it into our local structure.
|
||||
*/
|
||||
vlan_group_set_device(grp, vlan_id, dev);
|
||||
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
|
||||
grp->nr_vlan_devs++;
|
||||
|
||||
return 0;
|
||||
|
@ -195,7 +200,7 @@ int register_vlan_dev(struct net_device *dev)
|
|||
if (grp->nr_vlan_devs == 0)
|
||||
vlan_gvrp_uninit_applicant(real_dev);
|
||||
out_vid_del:
|
||||
vlan_vid_del(real_dev, vlan_id);
|
||||
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -213,7 +218,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
|
|||
if (vlan_id >= VLAN_VID_MASK)
|
||||
return -ERANGE;
|
||||
|
||||
err = vlan_check_real_dev(real_dev, vlan_id);
|
||||
err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -255,6 +260,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
|
|||
new_dev->mtu = real_dev->mtu;
|
||||
new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
|
||||
|
||||
vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q);
|
||||
vlan_dev_priv(new_dev)->vlan_id = vlan_id;
|
||||
vlan_dev_priv(new_dev)->real_dev = real_dev;
|
||||
vlan_dev_priv(new_dev)->dent = NULL;
|
||||
|
@ -301,7 +307,7 @@ static void vlan_transfer_features(struct net_device *dev,
|
|||
{
|
||||
vlandev->gso_max_size = dev->gso_max_size;
|
||||
|
||||
if (dev->features & NETIF_F_HW_VLAN_TX)
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
|
||||
vlandev->hard_header_len = dev->hard_header_len;
|
||||
else
|
||||
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
|
||||
|
@ -341,16 +347,17 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
|||
int i, flgs;
|
||||
struct net_device *vlandev;
|
||||
struct vlan_dev_priv *vlan;
|
||||
bool last = false;
|
||||
LIST_HEAD(list);
|
||||
|
||||
if (is_vlan_dev(dev))
|
||||
__vlan_device_event(dev, event);
|
||||
|
||||
if ((event == NETDEV_UP) &&
|
||||
(dev->features & NETIF_F_HW_VLAN_FILTER)) {
|
||||
(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
|
||||
pr_info("adding VLAN 0 to HW filter on device %s\n",
|
||||
dev->name);
|
||||
vlan_vid_add(dev, 0);
|
||||
vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
|
||||
}
|
||||
|
||||
vlan_info = rtnl_dereference(dev->vlan_info);
|
||||
|
@ -365,22 +372,13 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
|||
switch (event) {
|
||||
case NETDEV_CHANGE:
|
||||
/* Propagate real device state to vlan devices */
|
||||
for (i = 0; i < VLAN_N_VID; i++) {
|
||||
vlandev = vlan_group_get_device(grp, i);
|
||||
if (!vlandev)
|
||||
continue;
|
||||
|
||||
vlan_group_for_each_dev(grp, i, vlandev)
|
||||
netif_stacked_transfer_operstate(dev, vlandev);
|
||||
}
|
||||
break;
|
||||
|
||||
case NETDEV_CHANGEADDR:
|
||||
/* Adjust unicast filters on underlying device */
|
||||
for (i = 0; i < VLAN_N_VID; i++) {
|
||||
vlandev = vlan_group_get_device(grp, i);
|
||||
if (!vlandev)
|
||||
continue;
|
||||
|
||||
vlan_group_for_each_dev(grp, i, vlandev) {
|
||||
flgs = vlandev->flags;
|
||||
if (!(flgs & IFF_UP))
|
||||
continue;
|
||||
|
@ -390,11 +388,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
|||
break;
|
||||
|
||||
case NETDEV_CHANGEMTU:
|
||||
for (i = 0; i < VLAN_N_VID; i++) {
|
||||
vlandev = vlan_group_get_device(grp, i);
|
||||
if (!vlandev)
|
||||
continue;
|
||||
|
||||
vlan_group_for_each_dev(grp, i, vlandev) {
|
||||
if (vlandev->mtu <= dev->mtu)
|
||||
continue;
|
||||
|
||||
|
@ -404,26 +398,16 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
|||
|
||||
case NETDEV_FEAT_CHANGE:
|
||||
/* Propagate device features to underlying device */
|
||||
for (i = 0; i < VLAN_N_VID; i++) {
|
||||
vlandev = vlan_group_get_device(grp, i);
|
||||
if (!vlandev)
|
||||
continue;
|
||||
|
||||
vlan_group_for_each_dev(grp, i, vlandev)
|
||||
vlan_transfer_features(dev, vlandev);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case NETDEV_DOWN:
|
||||
if (dev->features & NETIF_F_HW_VLAN_FILTER)
|
||||
vlan_vid_del(dev, 0);
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
|
||||
|
||||
/* Put all VLANs for this dev in the down state too. */
|
||||
for (i = 0; i < VLAN_N_VID; i++) {
|
||||
vlandev = vlan_group_get_device(grp, i);
|
||||
if (!vlandev)
|
||||
continue;
|
||||
|
||||
vlan_group_for_each_dev(grp, i, vlandev) {
|
||||
flgs = vlandev->flags;
|
||||
if (!(flgs & IFF_UP))
|
||||
continue;
|
||||
|
@ -437,11 +421,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
|||
|
||||
case NETDEV_UP:
|
||||
/* Put all VLANs for this dev in the up state too. */
|
||||
for (i = 0; i < VLAN_N_VID; i++) {
|
||||
vlandev = vlan_group_get_device(grp, i);
|
||||
if (!vlandev)
|
||||
continue;
|
||||
|
||||
vlan_group_for_each_dev(grp, i, vlandev) {
|
||||
flgs = vlandev->flags;
|
||||
if (flgs & IFF_UP)
|
||||
continue;
|
||||
|
@ -458,17 +438,15 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
|||
if (dev->reg_state != NETREG_UNREGISTERING)
|
||||
break;
|
||||
|
||||
for (i = 0; i < VLAN_N_VID; i++) {
|
||||
vlandev = vlan_group_get_device(grp, i);
|
||||
if (!vlandev)
|
||||
continue;
|
||||
|
||||
vlan_group_for_each_dev(grp, i, vlandev) {
|
||||
/* removal of last vid destroys vlan_info, abort
|
||||
* afterwards */
|
||||
if (vlan_info->nr_vids == 1)
|
||||
i = VLAN_N_VID;
|
||||
last = true;
|
||||
|
||||
unregister_vlan_dev(vlandev, &list);
|
||||
if (last)
|
||||
break;
|
||||
}
|
||||
unregister_netdevice_many(&list);
|
||||
break;
|
||||
|
@ -482,13 +460,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
|||
case NETDEV_NOTIFY_PEERS:
|
||||
case NETDEV_BONDING_FAILOVER:
|
||||
/* Propagate to vlan devices */
|
||||
for (i = 0; i < VLAN_N_VID; i++) {
|
||||
vlandev = vlan_group_get_device(grp, i);
|
||||
if (!vlandev)
|
||||
continue;
|
||||
|
||||
vlan_group_for_each_dev(grp, i, vlandev)
|
||||
call_netdevice_notifiers(event, vlandev);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@ struct netpoll;
|
|||
* @ingress_priority_map: ingress priority mappings
|
||||
* @nr_egress_mappings: number of egress priority mappings
|
||||
* @egress_priority_map: hash of egress priority mappings
|
||||
* @vlan_proto: VLAN encapsulation protocol
|
||||
* @vlan_id: VLAN identifier
|
||||
* @flags: device flags
|
||||
* @real_dev: underlying netdevice
|
||||
|
@ -62,6 +63,7 @@ struct vlan_dev_priv {
|
|||
unsigned int nr_egress_mappings;
|
||||
struct vlan_priority_tci_mapping *egress_priority_map[16];
|
||||
|
||||
__be16 vlan_proto;
|
||||
u16 vlan_id;
|
||||
u16 flags;
|
||||
|
||||
|
@ -87,10 +89,17 @@ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
|
|||
#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
|
||||
#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
|
||||
|
||||
enum vlan_protos {
|
||||
VLAN_PROTO_8021Q = 0,
|
||||
VLAN_PROTO_8021AD,
|
||||
VLAN_PROTO_NUM,
|
||||
};
|
||||
|
||||
struct vlan_group {
|
||||
unsigned int nr_vlan_devs;
|
||||
struct hlist_node hlist; /* linked list */
|
||||
struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
|
||||
struct net_device **vlan_devices_arrays[VLAN_PROTO_NUM]
|
||||
[VLAN_GROUP_ARRAY_SPLIT_PARTS];
|
||||
};
|
||||
|
||||
struct vlan_info {
|
||||
|
@ -103,37 +112,66 @@ struct vlan_info {
|
|||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
|
||||
u16 vlan_id)
|
||||
static inline unsigned int vlan_proto_idx(__be16 proto)
|
||||
{
|
||||
switch (proto) {
|
||||
case __constant_htons(ETH_P_8021Q):
|
||||
return VLAN_PROTO_8021Q;
|
||||
case __constant_htons(ETH_P_8021AD):
|
||||
return VLAN_PROTO_8021AD;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg,
|
||||
unsigned int pidx,
|
||||
u16 vlan_id)
|
||||
{
|
||||
struct net_device **array;
|
||||
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
|
||||
|
||||
array = vg->vlan_devices_arrays[pidx]
|
||||
[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
|
||||
return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
|
||||
}
|
||||
|
||||
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
|
||||
__be16 vlan_proto,
|
||||
u16 vlan_id)
|
||||
{
|
||||
return __vlan_group_get_device(vg, vlan_proto_idx(vlan_proto), vlan_id);
|
||||
}
|
||||
|
||||
static inline void vlan_group_set_device(struct vlan_group *vg,
|
||||
u16 vlan_id,
|
||||
__be16 vlan_proto, u16 vlan_id,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct net_device **array;
|
||||
if (!vg)
|
||||
return;
|
||||
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
|
||||
array = vg->vlan_devices_arrays[vlan_proto_idx(vlan_proto)]
|
||||
[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
|
||||
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
|
||||
}
|
||||
|
||||
/* Must be invoked with rcu_read_lock or with RTNL. */
|
||||
static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
|
||||
u16 vlan_id)
|
||||
__be16 vlan_proto, u16 vlan_id)
|
||||
{
|
||||
struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
|
||||
|
||||
if (vlan_info)
|
||||
return vlan_group_get_device(&vlan_info->grp, vlan_id);
|
||||
return vlan_group_get_device(&vlan_info->grp,
|
||||
vlan_proto, vlan_id);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define vlan_group_for_each_dev(grp, i, dev) \
|
||||
for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \
|
||||
if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \
|
||||
(i) % VLAN_N_VID)))
|
||||
|
||||
/* found in vlan_dev.c */
|
||||
void vlan_dev_set_ingress_priority(const struct net_device *dev,
|
||||
u32 skb_prio, u16 vlan_prio);
|
||||
|
@ -142,7 +180,8 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
|
|||
int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
|
||||
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
|
||||
|
||||
int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id);
|
||||
int vlan_check_real_dev(struct net_device *real_dev,
|
||||
__be16 protocol, u16 vlan_id);
|
||||
void vlan_setup(struct net_device *dev);
|
||||
int register_vlan_dev(struct net_device *dev);
|
||||
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
|
||||
|
|
|
@ -8,11 +8,12 @@
|
|||
bool vlan_do_receive(struct sk_buff **skbp)
|
||||
{
|
||||
struct sk_buff *skb = *skbp;
|
||||
__be16 vlan_proto = skb->vlan_proto;
|
||||
u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
|
||||
struct net_device *vlan_dev;
|
||||
struct vlan_pcpu_stats *rx_stats;
|
||||
|
||||
vlan_dev = vlan_find_dev(skb->dev, vlan_id);
|
||||
vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
|
||||
if (!vlan_dev)
|
||||
return false;
|
||||
|
||||
|
@ -38,7 +39,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
|
|||
* original position later
|
||||
*/
|
||||
skb_push(skb, offset);
|
||||
skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
|
||||
skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
|
||||
skb->vlan_tci);
|
||||
if (!skb)
|
||||
return false;
|
||||
skb_pull(skb, offset + VLAN_HLEN);
|
||||
|
@ -62,12 +64,13 @@ bool vlan_do_receive(struct sk_buff **skbp)
|
|||
|
||||
/* Must be invoked with rcu_read_lock. */
|
||||
struct net_device *__vlan_find_dev_deep(struct net_device *dev,
|
||||
u16 vlan_id)
|
||||
__be16 vlan_proto, u16 vlan_id)
|
||||
{
|
||||
struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
|
||||
|
||||
if (vlan_info) {
|
||||
return vlan_group_get_device(&vlan_info->grp, vlan_id);
|
||||
return vlan_group_get_device(&vlan_info->grp,
|
||||
vlan_proto, vlan_id);
|
||||
} else {
|
||||
/*
|
||||
* Lower devices of master uppers (bonding, team) do not have
|
||||
|
@ -78,7 +81,8 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev,
|
|||
|
||||
upper_dev = netdev_master_upper_dev_get_rcu(dev);
|
||||
if (upper_dev)
|
||||
return __vlan_find_dev_deep(upper_dev, vlan_id);
|
||||
return __vlan_find_dev_deep(upper_dev,
|
||||
vlan_proto, vlan_id);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -125,7 +129,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
|
|||
|
||||
vhdr = (struct vlan_hdr *) skb->data;
|
||||
vlan_tci = ntohs(vhdr->h_vlan_TCI);
|
||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
||||
__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
|
||||
|
||||
skb_pull_rcsum(skb, VLAN_HLEN);
|
||||
vlan_set_encap_proto(skb, vhdr);
|
||||
|
@ -185,35 +189,49 @@ static struct vlan_info *vlan_info_alloc(struct net_device *dev)
|
|||
|
||||
struct vlan_vid_info {
|
||||
struct list_head list;
|
||||
unsigned short vid;
|
||||
__be16 proto;
|
||||
u16 vid;
|
||||
int refcount;
|
||||
};
|
||||
|
||||
static bool vlan_hw_filter_capable(const struct net_device *dev,
|
||||
const struct vlan_vid_info *vid_info)
|
||||
{
|
||||
if (vid_info->proto == htons(ETH_P_8021Q) &&
|
||||
dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
return true;
|
||||
if (vid_info->proto == htons(ETH_P_8021AD) &&
|
||||
dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
|
||||
unsigned short vid)
|
||||
__be16 proto, u16 vid)
|
||||
{
|
||||
struct vlan_vid_info *vid_info;
|
||||
|
||||
list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
|
||||
if (vid_info->vid == vid)
|
||||
if (vid_info->proto == proto && vid_info->vid == vid)
|
||||
return vid_info;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid)
|
||||
static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
|
||||
{
|
||||
struct vlan_vid_info *vid_info;
|
||||
|
||||
vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
|
||||
if (!vid_info)
|
||||
return NULL;
|
||||
vid_info->proto = proto;
|
||||
vid_info->vid = vid;
|
||||
|
||||
return vid_info;
|
||||
}
|
||||
|
||||
static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
|
||||
static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
|
||||
struct vlan_vid_info **pvid_info)
|
||||
{
|
||||
struct net_device *dev = vlan_info->real_dev;
|
||||
|
@ -221,12 +239,12 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
|
|||
struct vlan_vid_info *vid_info;
|
||||
int err;
|
||||
|
||||
vid_info = vlan_vid_info_alloc(vid);
|
||||
vid_info = vlan_vid_info_alloc(proto, vid);
|
||||
if (!vid_info)
|
||||
return -ENOMEM;
|
||||
|
||||
if (dev->features & NETIF_F_HW_VLAN_FILTER) {
|
||||
err = ops->ndo_vlan_rx_add_vid(dev, vid);
|
||||
if (vlan_hw_filter_capable(dev, vid_info)) {
|
||||
err = ops->ndo_vlan_rx_add_vid(dev, proto, vid);
|
||||
if (err) {
|
||||
kfree(vid_info);
|
||||
return err;
|
||||
|
@ -238,7 +256,7 @@ static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int vlan_vid_add(struct net_device *dev, unsigned short vid)
|
||||
int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct vlan_info *vlan_info;
|
||||
struct vlan_vid_info *vid_info;
|
||||
|
@ -254,9 +272,9 @@ int vlan_vid_add(struct net_device *dev, unsigned short vid)
|
|||
return -ENOMEM;
|
||||
vlan_info_created = true;
|
||||
}
|
||||
vid_info = vlan_vid_info_get(vlan_info, vid);
|
||||
vid_info = vlan_vid_info_get(vlan_info, proto, vid);
|
||||
if (!vid_info) {
|
||||
err = __vlan_vid_add(vlan_info, vid, &vid_info);
|
||||
err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
|
||||
if (err)
|
||||
goto out_free_vlan_info;
|
||||
}
|
||||
|
@ -279,14 +297,15 @@ static void __vlan_vid_del(struct vlan_info *vlan_info,
|
|||
{
|
||||
struct net_device *dev = vlan_info->real_dev;
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
unsigned short vid = vid_info->vid;
|
||||
__be16 proto = vid_info->proto;
|
||||
u16 vid = vid_info->vid;
|
||||
int err;
|
||||
|
||||
if (dev->features & NETIF_F_HW_VLAN_FILTER) {
|
||||
err = ops->ndo_vlan_rx_kill_vid(dev, vid);
|
||||
if (vlan_hw_filter_capable(dev, vid_info)) {
|
||||
err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
|
||||
if (err) {
|
||||
pr_warn("failed to kill vid %d for device %s\n",
|
||||
vid, dev->name);
|
||||
pr_warn("failed to kill vid %04x/%d for device %s\n",
|
||||
proto, vid, dev->name);
|
||||
}
|
||||
}
|
||||
list_del(&vid_info->list);
|
||||
|
@ -294,7 +313,7 @@ static void __vlan_vid_del(struct vlan_info *vlan_info,
|
|||
vlan_info->nr_vids--;
|
||||
}
|
||||
|
||||
void vlan_vid_del(struct net_device *dev, unsigned short vid)
|
||||
void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct vlan_info *vlan_info;
|
||||
struct vlan_vid_info *vid_info;
|
||||
|
@ -305,7 +324,7 @@ void vlan_vid_del(struct net_device *dev, unsigned short vid)
|
|||
if (!vlan_info)
|
||||
return;
|
||||
|
||||
vid_info = vlan_vid_info_get(vlan_info, vid);
|
||||
vid_info = vlan_vid_info_get(vlan_info, proto, vid);
|
||||
if (!vid_info)
|
||||
return;
|
||||
vid_info->refcount--;
|
||||
|
@ -333,7 +352,7 @@ int vlan_vids_add_by_dev(struct net_device *dev,
|
|||
return 0;
|
||||
|
||||
list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
|
||||
err = vlan_vid_add(dev, vid_info->vid);
|
||||
err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
|
||||
if (err)
|
||||
goto unwind;
|
||||
}
|
||||
|
@ -343,7 +362,7 @@ int vlan_vids_add_by_dev(struct net_device *dev,
|
|||
list_for_each_entry_continue_reverse(vid_info,
|
||||
&vlan_info->vid_list,
|
||||
list) {
|
||||
vlan_vid_del(dev, vid_info->vid);
|
||||
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -363,7 +382,7 @@ void vlan_vids_del_by_dev(struct net_device *dev,
|
|||
return;
|
||||
|
||||
list_for_each_entry(vid_info, &vlan_info->vid_list, list)
|
||||
vlan_vid_del(dev, vid_info->vid);
|
||||
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
|
||||
}
|
||||
EXPORT_SYMBOL(vlan_vids_del_by_dev);
|
||||
|
||||
|
|
|
@ -99,6 +99,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
|
|||
const void *daddr, const void *saddr,
|
||||
unsigned int len)
|
||||
{
|
||||
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
struct vlan_hdr *vhdr;
|
||||
unsigned int vhdrlen = 0;
|
||||
u16 vlan_tci = 0;
|
||||
|
@ -120,8 +121,8 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
|
|||
else
|
||||
vhdr->h_vlan_encapsulated_proto = htons(len);
|
||||
|
||||
skb->protocol = htons(ETH_P_8021Q);
|
||||
type = ETH_P_8021Q;
|
||||
skb->protocol = vlan->vlan_proto;
|
||||
type = ntohs(vlan->vlan_proto);
|
||||
vhdrlen = VLAN_HLEN;
|
||||
}
|
||||
|
||||
|
@ -161,12 +162,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
|
|||
* NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
|
||||
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
|
||||
*/
|
||||
if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
|
||||
if (veth->h_vlan_proto != vlan->vlan_proto ||
|
||||
vlan->flags & VLAN_FLAG_REORDER_HDR) {
|
||||
u16 vlan_tci;
|
||||
vlan_tci = vlan->vlan_id;
|
||||
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
|
||||
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
|
||||
skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
|
||||
}
|
||||
|
||||
skb->dev = vlan->real_dev;
|
||||
|
@ -583,7 +584,7 @@ static int vlan_dev_init(struct net_device *dev)
|
|||
#endif
|
||||
|
||||
dev->needed_headroom = real_dev->needed_headroom;
|
||||
if (real_dev->features & NETIF_F_HW_VLAN_TX) {
|
||||
if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
|
||||
dev->header_ops = real_dev->header_ops;
|
||||
dev->hard_header_len = real_dev->hard_header_len;
|
||||
} else {
|
||||
|
|
|
@ -32,6 +32,8 @@ int vlan_gvrp_request_join(const struct net_device *dev)
|
|||
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
__be16 vlan_id = htons(vlan->vlan_id);
|
||||
|
||||
if (vlan->vlan_proto != htons(ETH_P_8021Q))
|
||||
return 0;
|
||||
return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
|
||||
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
|
||||
}
|
||||
|
@ -41,6 +43,8 @@ void vlan_gvrp_request_leave(const struct net_device *dev)
|
|||
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
__be16 vlan_id = htons(vlan->vlan_id);
|
||||
|
||||
if (vlan->vlan_proto != htons(ETH_P_8021Q))
|
||||
return;
|
||||
garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
|
||||
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
|
||||
}
|
||||
|
|
|
@ -38,6 +38,8 @@ int vlan_mvrp_request_join(const struct net_device *dev)
|
|||
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
__be16 vlan_id = htons(vlan->vlan_id);
|
||||
|
||||
if (vlan->vlan_proto != htons(ETH_P_8021Q))
|
||||
return 0;
|
||||
return mrp_request_join(vlan->real_dev, &vlan_mrp_app,
|
||||
&vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
|
||||
}
|
||||
|
@ -47,6 +49,8 @@ void vlan_mvrp_request_leave(const struct net_device *dev)
|
|||
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
__be16 vlan_id = htons(vlan->vlan_id);
|
||||
|
||||
if (vlan->vlan_proto != htons(ETH_P_8021Q))
|
||||
return;
|
||||
mrp_request_leave(vlan->real_dev, &vlan_mrp_app,
|
||||
&vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
|
|||
[IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
|
||||
[IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
|
||||
[IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
|
||||
[IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 },
|
||||
};
|
||||
|
||||
static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
|
||||
|
@ -53,6 +54,16 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
|
|||
if (!data)
|
||||
return -EINVAL;
|
||||
|
||||
if (data[IFLA_VLAN_PROTOCOL]) {
|
||||
switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
|
||||
case __constant_htons(ETH_P_8021Q):
|
||||
case __constant_htons(ETH_P_8021AD):
|
||||
break;
|
||||
default:
|
||||
return -EPROTONOSUPPORT;
|
||||
}
|
||||
}
|
||||
|
||||
if (data[IFLA_VLAN_ID]) {
|
||||
id = nla_get_u16(data[IFLA_VLAN_ID]);
|
||||
if (id >= VLAN_VID_MASK)
|
||||
|
@ -107,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
|
|||
{
|
||||
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
struct net_device *real_dev;
|
||||
__be16 proto;
|
||||
int err;
|
||||
|
||||
if (!data[IFLA_VLAN_ID])
|
||||
|
@ -118,11 +130,17 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
|
|||
if (!real_dev)
|
||||
return -ENODEV;
|
||||
|
||||
vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
|
||||
vlan->real_dev = real_dev;
|
||||
vlan->flags = VLAN_FLAG_REORDER_HDR;
|
||||
if (data[IFLA_VLAN_PROTOCOL])
|
||||
proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]);
|
||||
else
|
||||
proto = htons(ETH_P_8021Q);
|
||||
|
||||
err = vlan_check_real_dev(real_dev, vlan->vlan_id);
|
||||
vlan->vlan_proto = proto;
|
||||
vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
|
||||
vlan->real_dev = real_dev;
|
||||
vlan->flags = VLAN_FLAG_REORDER_HDR;
|
||||
|
||||
err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -151,7 +169,8 @@ static size_t vlan_get_size(const struct net_device *dev)
|
|||
{
|
||||
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
|
||||
return nla_total_size(2) + /* IFLA_VLAN_ID */
|
||||
return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
|
||||
nla_total_size(2) + /* IFLA_VLAN_ID */
|
||||
sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
|
||||
vlan_qos_map_size(vlan->nr_ingress_mappings) +
|
||||
vlan_qos_map_size(vlan->nr_egress_mappings);
|
||||
|
@ -166,7 +185,8 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|||
struct nlattr *nest;
|
||||
unsigned int i;
|
||||
|
||||
if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id))
|
||||
if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) ||
|
||||
nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id))
|
||||
goto nla_put_failure;
|
||||
if (vlan->flags) {
|
||||
f.flags = vlan->flags;
|
||||
|
|
|
@ -341,7 +341,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
|
|||
}
|
||||
|
||||
if (vid != -1)
|
||||
skb = vlan_insert_tag(skb, vid);
|
||||
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
skb->protocol = eth_type_trans(skb, soft_iface);
|
||||
|
|
|
@ -348,10 +348,10 @@ void br_dev_setup(struct net_device *dev)
|
|||
|
||||
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
|
||||
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX |
|
||||
NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
|
||||
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
|
||||
NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
br->dev = dev;
|
||||
spin_lock_init(&br->lock);
|
||||
|
|
|
@ -535,7 +535,8 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
|
|||
if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
|
||||
return br;
|
||||
|
||||
vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK);
|
||||
vlan = __vlan_find_dev_deep(br, skb->vlan_proto,
|
||||
vlan_tx_tag_get(skb) & VLAN_VID_MASK);
|
||||
|
||||
return vlan ? vlan : br;
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
|
|||
|
||||
static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
|
||||
{
|
||||
const struct net_device_ops *ops;
|
||||
struct net_bridge_port *p = NULL;
|
||||
struct net_bridge *br;
|
||||
struct net_device *dev;
|
||||
|
@ -53,15 +54,17 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
|
|||
br = v->parent.br;
|
||||
dev = br->dev;
|
||||
}
|
||||
ops = dev->netdev_ops;
|
||||
|
||||
if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) {
|
||||
if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
|
||||
/* Add VLAN to the device filter if it is supported.
|
||||
* Stricly speaking, this is not necessary now, since
|
||||
* devices are made promiscuous by the bridge, but if
|
||||
* that ever changes this code will allow tagged
|
||||
* traffic to enter the bridge.
|
||||
*/
|
||||
err = dev->netdev_ops->ndo_vlan_rx_add_vid(dev, vid);
|
||||
err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
|
||||
vid);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -82,8 +85,8 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
|
|||
return 0;
|
||||
|
||||
out_filt:
|
||||
if (p && (dev->features & NETIF_F_HW_VLAN_FILTER))
|
||||
dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid);
|
||||
if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
|
||||
ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -97,9 +100,10 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
|
|||
|
||||
if (v->port_idx && vid) {
|
||||
struct net_device *dev = v->parent.port->dev;
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
|
||||
if (dev->features & NETIF_F_HW_VLAN_FILTER)
|
||||
dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid);
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
clear_bit(vid, v->vlan_bitmap);
|
||||
|
@ -171,7 +175,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
|
|||
* mac header.
|
||||
*/
|
||||
skb_push(skb, ETH_HLEN);
|
||||
skb = __vlan_put_tag(skb, skb->vlan_tci);
|
||||
skb = __vlan_put_tag(skb, skb->vlan_proto, skb->vlan_tci);
|
||||
if (!skb)
|
||||
goto out;
|
||||
/* put skb->data back to where it was */
|
||||
|
@ -213,7 +217,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
|
|||
/* PVID is set on this port. Any untagged ingress
|
||||
* frame is considered to belong to this vlan.
|
||||
*/
|
||||
__vlan_hwaccel_put_tag(skb, pvid);
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -2212,7 +2212,7 @@ __be16 skb_network_protocol(struct sk_buff *skb)
|
|||
__be16 type = skb->protocol;
|
||||
int vlan_depth = ETH_HLEN;
|
||||
|
||||
while (type == htons(ETH_P_8021Q)) {
|
||||
while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
|
||||
struct vlan_hdr *vh;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
|
||||
|
@ -2428,20 +2428,22 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
|
|||
if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
|
||||
features &= ~NETIF_F_GSO_MASK;
|
||||
|
||||
if (protocol == htons(ETH_P_8021Q)) {
|
||||
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
|
||||
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
|
||||
protocol = veh->h_vlan_encapsulated_proto;
|
||||
} else if (!vlan_tx_tag_present(skb)) {
|
||||
return harmonize_features(skb, protocol, features);
|
||||
}
|
||||
|
||||
features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
|
||||
features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
if (protocol != htons(ETH_P_8021Q)) {
|
||||
if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
|
||||
return harmonize_features(skb, protocol, features);
|
||||
} else {
|
||||
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
|
||||
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
|
||||
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
return harmonize_features(skb, protocol, features);
|
||||
}
|
||||
}
|
||||
|
@ -2482,8 +2484,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
features = netif_skb_features(skb);
|
||||
|
||||
if (vlan_tx_tag_present(skb) &&
|
||||
!(features & NETIF_F_HW_VLAN_TX)) {
|
||||
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
|
||||
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
|
||||
skb = __vlan_put_tag(skb, skb->vlan_proto,
|
||||
vlan_tx_tag_get(skb));
|
||||
if (unlikely(!skb))
|
||||
goto out;
|
||||
|
||||
|
@ -3359,6 +3362,7 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
|
|||
case __constant_htons(ETH_P_IP):
|
||||
case __constant_htons(ETH_P_IPV6):
|
||||
case __constant_htons(ETH_P_8021Q):
|
||||
case __constant_htons(ETH_P_8021AD):
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
@ -3399,7 +3403,8 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
|
|||
|
||||
__this_cpu_inc(softnet_data.processed);
|
||||
|
||||
if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
|
||||
if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
|
||||
skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
|
||||
skb = vlan_untag(skb);
|
||||
if (unlikely(!skb))
|
||||
goto unlock;
|
||||
|
@ -5180,7 +5185,8 @@ int register_netdevice(struct net_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_FILTER) &&
|
||||
if (((dev->hw_features | dev->features) &
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER) &&
|
||||
(!dev->netdev_ops->ndo_vlan_rx_add_vid ||
|
||||
!dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
|
||||
netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
|
||||
|
|
|
@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
|
|||
[NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
|
||||
[NETIF_F_HIGHDMA_BIT] = "highdma",
|
||||
[NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
|
||||
[NETIF_F_HW_VLAN_TX_BIT] = "tx-vlan-hw-insert",
|
||||
[NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert",
|
||||
|
||||
[NETIF_F_HW_VLAN_RX_BIT] = "rx-vlan-hw-parse",
|
||||
[NETIF_F_HW_VLAN_FILTER_BIT] = "rx-vlan-filter",
|
||||
[NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse",
|
||||
[NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter",
|
||||
[NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged",
|
||||
[NETIF_F_GSO_BIT] = "tx-generic-segmentation",
|
||||
[NETIF_F_LLTX_BIT] = "tx-lockless",
|
||||
|
@ -267,18 +267,19 @@ static int ethtool_set_one_feature(struct net_device *dev,
|
|||
|
||||
#define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \
|
||||
ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH)
|
||||
#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \
|
||||
NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH)
|
||||
#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \
|
||||
NETIF_F_RXHASH)
|
||||
|
||||
static u32 __ethtool_get_flags(struct net_device *dev)
|
||||
{
|
||||
u32 flags = 0;
|
||||
|
||||
if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO;
|
||||
if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN;
|
||||
if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN;
|
||||
if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE;
|
||||
if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH;
|
||||
if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO;
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) flags |= ETH_FLAG_RXVLAN;
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) flags |= ETH_FLAG_TXVLAN;
|
||||
if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE;
|
||||
if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
@ -291,8 +292,8 @@ static int __ethtool_set_flags(struct net_device *dev, u32 data)
|
|||
return -EINVAL;
|
||||
|
||||
if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO;
|
||||
if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_RX;
|
||||
if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_TX;
|
||||
if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE;
|
||||
if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH;
|
||||
|
||||
|
|
|
@ -383,8 +383,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
|||
if (__netif_tx_trylock(txq)) {
|
||||
if (!netif_xmit_stopped(txq)) {
|
||||
if (vlan_tx_tag_present(skb) &&
|
||||
!(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) {
|
||||
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
|
||||
!vlan_hw_offload_capable(netif_skb_features(skb),
|
||||
skb->vlan_proto)) {
|
||||
skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
|
||||
if (unlikely(!skb))
|
||||
break;
|
||||
skb->vlan_tci = 0;
|
||||
|
|
|
@ -707,6 +707,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
|
|||
new->tc_verd = old->tc_verd;
|
||||
#endif
|
||||
#endif
|
||||
new->vlan_proto = old->vlan_proto;
|
||||
new->vlan_tci = old->vlan_tci;
|
||||
|
||||
skb_copy_secmark(new, old);
|
||||
|
|
|
@ -98,7 +98,7 @@ static int pop_vlan(struct sk_buff *skb)
|
|||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, ntohs(tci));
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
|
|||
/* push down current VLAN tag */
|
||||
current_tag = vlan_tx_tag_get(skb);
|
||||
|
||||
if (!__vlan_put_tag(skb, current_tag))
|
||||
if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
|
||||
return -ENOMEM;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
|
@ -118,7 +118,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
|
|||
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
|
||||
|
||||
}
|
||||
__vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
|
||||
__vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -401,7 +401,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
|
|||
if (!nskb)
|
||||
return -ENOMEM;
|
||||
|
||||
nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
|
||||
nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
|
||||
if (!nskb)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue