qede: Use NETIF_F_GRO_HW.

Advertise NETIF_F_GRO_HW and set edev->gro_disable according to the
feature flag.  Add qede_fix_features() to drop NETIF_F_GRO_HW if
XDP is running or MTU does not support GRO_HW or GRO is not set.
qede_change_mtu() also checks and disables GRO_HW if MTU is not
supported.

Cc: Ariel Elior <Ariel.Elior@cavium.com>
Cc: everest-linux-l2@cavium.com
Acked-by: Manish Chopra <manish.chopra@cavium.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Acked-by: Manish Chopra <manish.chopra@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Michael Chan 2017-12-16 03:09:44 -05:00 committed by David S. Miller
parent 3c3def5fc6
commit 18c602dee4
4 changed files with 25 additions and 18 deletions

View File

@ -494,6 +494,8 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
int qede_configure_vlan_filters(struct qede_dev *edev);
netdev_features_t qede_fix_features(struct net_device *dev,
netdev_features_t features);
int qede_set_features(struct net_device *dev, netdev_features_t features);
void qede_set_rx_mode(struct net_device *ndev);
void qede_config_rx_mode(struct net_device *ndev);

View File

@ -940,6 +940,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Configuring MTU size of %d\n", new_mtu);
if (new_mtu > PAGE_SIZE)
ndev->features &= ~NETIF_F_GRO_HW;
/* Set the mtu field and re-start the interface if needed */
args.u.mtu = new_mtu;
args.func = &qede_update_mtu;

View File

@ -895,19 +895,26 @@ static void qede_set_features_reload(struct qede_dev *edev,
edev->ndev->features = args->u.features;
}
netdev_features_t qede_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
!(features & NETIF_F_GRO))
features &= ~NETIF_F_GRO_HW;
return features;
}
int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
bool need_reload = false;
/* No action needed if hardware GRO is disabled during driver load */
if (changes & NETIF_F_GRO) {
if (dev->features & NETIF_F_GRO)
need_reload = !edev->gro_disable;
else
need_reload = edev->gro_disable;
}
if (changes & NETIF_F_GRO_HW)
need_reload = true;
if (need_reload) {
struct qede_reload_args args;

View File

@ -545,6 +545,7 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
@ -572,6 +573,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_change_mtu = qede_change_mtu,
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@ -589,6 +591,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_change_mtu = qede_change_mtu,
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@ -676,7 +679,7 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->priv_flags |= IFF_UNICAST_FLT;
/* user-changeble features */
hw_features = NETIF_F_GRO | NETIF_F_SG |
hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
@ -1228,18 +1231,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
dma_addr_t mapping;
int i;
/* Don't perform FW aggregations in case of XDP */
if (edev->xdp_prog)
edev->gro_disable = 1;
if (edev->gro_disable)
return 0;
if (edev->ndev->mtu > PAGE_SIZE) {
edev->gro_disable = 1;
return 0;
}
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
struct sw_rx_data *replace_buf = &tpa_info->buffer;
@ -1269,6 +1263,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
err:
qede_free_sge_mem(edev, rxq);
edev->gro_disable = 1;
edev->ndev->features &= ~NETIF_F_GRO_HW;
return -ENOMEM;
}
@ -1511,7 +1506,7 @@ static void qede_init_fp(struct qede_dev *edev)
edev->ndev->name, queue_id);
}
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
}
static int qede_set_real_num_queues(struct qede_dev *edev)