Merge branch 'bnxt_en-Fixes-for-net'
Michael Chan says: ==================== bnxt_en: Fixes for net. This bug fix series include NULL pointer fixes in ethtool -x code path and in the error clean up path when freeing IRQs, a ring accounting bug that missed rings used by the RDMA driver, and 3 bug fixes related to TC Flower and VF-reps. v2: Fixed commit message of patch 4. Changed the pound sign to $ sign in front of the ip command. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9cf74f593a
|
@ -6090,7 +6090,7 @@ static void bnxt_free_irq(struct bnxt *bp)
|
||||||
free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
|
free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
|
||||||
bp->dev->rx_cpu_rmap = NULL;
|
bp->dev->rx_cpu_rmap = NULL;
|
||||||
#endif
|
#endif
|
||||||
if (!bp->irq_tbl)
|
if (!bp->irq_tbl || !bp->bnapi)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < bp->cp_nr_rings; i++) {
|
for (i = 0; i < bp->cp_nr_rings; i++) {
|
||||||
|
@ -7686,6 +7686,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
|
||||||
if (bp->flags & BNXT_FLAG_AGG_RINGS)
|
if (bp->flags & BNXT_FLAG_AGG_RINGS)
|
||||||
rx_rings <<= 1;
|
rx_rings <<= 1;
|
||||||
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
|
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
|
||||||
|
if (bp->flags & BNXT_FLAG_NEW_RM)
|
||||||
|
cp += bnxt_get_ulp_msix_num(bp);
|
||||||
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
|
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
|
||||||
vnics);
|
vnics);
|
||||||
}
|
}
|
||||||
|
|
|
@ -870,17 +870,22 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
|
||||||
u8 *hfunc)
|
u8 *hfunc)
|
||||||
{
|
{
|
||||||
struct bnxt *bp = netdev_priv(dev);
|
struct bnxt *bp = netdev_priv(dev);
|
||||||
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
|
struct bnxt_vnic_info *vnic;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
if (hfunc)
|
if (hfunc)
|
||||||
*hfunc = ETH_RSS_HASH_TOP;
|
*hfunc = ETH_RSS_HASH_TOP;
|
||||||
|
|
||||||
if (indir)
|
if (!bp->vnic_info)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
vnic = &bp->vnic_info[0];
|
||||||
|
if (indir && vnic->rss_table) {
|
||||||
for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
|
for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
|
||||||
indir[i] = le16_to_cpu(vnic->rss_table[i]);
|
indir[i] = le16_to_cpu(vnic->rss_table[i]);
|
||||||
|
}
|
||||||
|
|
||||||
if (key)
|
if (key && vnic->rss_hash_key)
|
||||||
memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
|
memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -377,6 +377,30 @@ static bool is_wildcard(void *mask, int len)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_exactmatch(void *mask, int len)
|
||||||
|
{
|
||||||
|
const u8 *p = mask;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < len; i++)
|
||||||
|
if (p[i] != 0xff)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool bits_set(void *key, int len)
|
||||||
|
{
|
||||||
|
const u8 *p = key;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < len; i++)
|
||||||
|
if (p[i] != 0)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
|
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
|
||||||
__le16 ref_flow_handle,
|
__le16 ref_flow_handle,
|
||||||
__le32 tunnel_handle, __le16 *flow_handle)
|
__le32 tunnel_handle, __le16 *flow_handle)
|
||||||
|
@ -764,6 +788,41 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Currently source/dest MAC cannot be partial wildcard */
|
||||||
|
if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
|
||||||
|
!is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
|
||||||
|
netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
|
||||||
|
!is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
|
||||||
|
netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Currently VLAN fields cannot be partial wildcard */
|
||||||
|
if (bits_set(&flow->l2_key.inner_vlan_tci,
|
||||||
|
sizeof(flow->l2_key.inner_vlan_tci)) &&
|
||||||
|
!is_exactmatch(&flow->l2_mask.inner_vlan_tci,
|
||||||
|
sizeof(flow->l2_mask.inner_vlan_tci))) {
|
||||||
|
netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (bits_set(&flow->l2_key.inner_vlan_tpid,
|
||||||
|
sizeof(flow->l2_key.inner_vlan_tpid)) &&
|
||||||
|
!is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
|
||||||
|
sizeof(flow->l2_mask.inner_vlan_tpid))) {
|
||||||
|
netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Currently Ethertype must be set */
|
||||||
|
if (!is_exactmatch(&flow->l2_mask.ether_type,
|
||||||
|
sizeof(flow->l2_mask.ether_type))) {
|
||||||
|
netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -992,8 +1051,10 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
|
||||||
|
|
||||||
/* Check if there's another flow using the same tunnel decap.
|
/* Check if there's another flow using the same tunnel decap.
|
||||||
* If not, add this tunnel to the table and resolve the other
|
* If not, add this tunnel to the table and resolve the other
|
||||||
* tunnel header fileds
|
* tunnel header fileds. Ignore src_port in the tunnel_key,
|
||||||
|
* since it is not required for decap filters.
|
||||||
*/
|
*/
|
||||||
|
decap_key->tp_src = 0;
|
||||||
decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
|
decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
|
||||||
&tc_info->decap_ht_params,
|
&tc_info->decap_ht_params,
|
||||||
decap_key);
|
decap_key);
|
||||||
|
|
|
@ -64,6 +64,31 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
|
||||||
|
u16 *max_mtu)
|
||||||
|
{
|
||||||
|
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
|
||||||
|
struct hwrm_func_qcfg_input req = {0};
|
||||||
|
u16 mtu;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
|
||||||
|
req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
|
||||||
|
|
||||||
|
mutex_lock(&bp->hwrm_cmd_lock);
|
||||||
|
|
||||||
|
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||||
|
if (!rc) {
|
||||||
|
mtu = le16_to_cpu(resp->max_mtu_configured);
|
||||||
|
if (!mtu)
|
||||||
|
*max_mtu = BNXT_MAX_MTU;
|
||||||
|
else
|
||||||
|
*max_mtu = mtu;
|
||||||
|
}
|
||||||
|
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
static int bnxt_vf_rep_open(struct net_device *dev)
|
static int bnxt_vf_rep_open(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
|
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
|
||||||
|
@ -365,6 +390,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
|
||||||
struct net_device *dev)
|
struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct net_device *pf_dev = bp->dev;
|
struct net_device *pf_dev = bp->dev;
|
||||||
|
u16 max_mtu;
|
||||||
|
|
||||||
dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
|
dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
|
||||||
dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
|
dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
|
||||||
|
@ -380,6 +406,10 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
|
||||||
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
|
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
|
||||||
dev->perm_addr);
|
dev->perm_addr);
|
||||||
ether_addr_copy(dev->dev_addr, dev->perm_addr);
|
ether_addr_copy(dev->dev_addr, dev->perm_addr);
|
||||||
|
/* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
|
||||||
|
if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
|
||||||
|
dev->max_mtu = max_mtu;
|
||||||
|
dev->min_mtu = ETH_ZLEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
|
static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
|
||||||
|
|
Loading…
Reference in New Issue