mirror of https://gitee.com/openkylin/linux.git
Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 100GbE Intel Wired LAN Driver Updates 2016-08-29 This series contains updates to fm10k only. Jake provides all the changes in this series starting with fixes an issue where VF devices may fail during an unbind/bind and we will never zero the reference counter for the pci_dev structure. Updated the hot path to use SW counters instead of checking for hardware Tx pending for possible transmit hangs, which will improve performance. Fixed the NAPI budget accounting so that fm10k_poll will return actual work done, capped at (budget - 1) instead of returning 0. Added a check to ensure that the device is in the normal IO state before continuing to probe, which allows us to give a more descriptive message of what is wrong in the case of uncorrectable AER error. In preparation for adding Geneve Rx offload support, refactored the current VXLAN offload flow to be a bit more generic. Added support for receive offloads on one Geneve tunnel. Ensure that other bits in the RXQCTL register do not get cleared, to make sure that bits related to queue ownership are maintained. Fixed an issue in queue ownership assignment which casued a race condition between the PF and the VF such that potentially a VF could cause FUM fault errors due to normal PF/VF driver behavior. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3201a39ba8
|
@ -240,9 +240,7 @@ struct fm10k_iov_data {
|
|||
struct fm10k_vf_info vf_info[0];
|
||||
};
|
||||
|
||||
#define fm10k_vxlan_port_for_each(vp, intfc) \
|
||||
list_for_each_entry(vp, &(intfc)->vxlan_port, list)
|
||||
struct fm10k_vxlan_port {
|
||||
struct fm10k_udp_port {
|
||||
struct list_head list;
|
||||
sa_family_t sa_family;
|
||||
__be16 port;
|
||||
|
@ -335,8 +333,9 @@ struct fm10k_intfc {
|
|||
u32 reta[FM10K_RETA_SIZE];
|
||||
u32 rssrk[FM10K_RSSRK_SIZE];
|
||||
|
||||
/* VXLAN port tracking information */
|
||||
/* UDP encapsulation port tracking information */
|
||||
struct list_head vxlan_port;
|
||||
struct list_head geneve_port;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *dbg_intfc;
|
||||
|
@ -458,7 +457,7 @@ __be16 fm10k_tx_encap_offload(struct sk_buff *skb);
|
|||
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
|
||||
struct fm10k_ring *tx_ring);
|
||||
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
|
||||
u64 fm10k_get_tx_pending(struct fm10k_ring *ring);
|
||||
u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw);
|
||||
bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
|
||||
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count);
|
||||
|
||||
|
@ -496,7 +495,6 @@ int fm10k_close(struct net_device *netdev);
|
|||
|
||||
/* Ethtool */
|
||||
void fm10k_set_ethtool_ops(struct net_device *dev);
|
||||
u32 fm10k_get_reta_size(struct net_device *netdev);
|
||||
void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir);
|
||||
|
||||
/* IOV */
|
||||
|
|
|
@ -207,6 +207,9 @@ s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
|
|||
/* clear tx_ready to prevent any false hits for reset */
|
||||
hw->mac.tx_ready = false;
|
||||
|
||||
if (FM10K_REMOVED(hw->hw_addr))
|
||||
return 0;
|
||||
|
||||
/* clear the enable bit for all rings */
|
||||
for (i = 0; i < q_cnt; i++) {
|
||||
reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
|
||||
|
|
|
@ -34,7 +34,7 @@ u32 fm10k_read_reg(struct fm10k_hw *hw, int reg);
|
|||
/* write operations, indexed using DWORDS */
|
||||
#define fm10k_write_reg(hw, reg, val) \
|
||||
do { \
|
||||
u32 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
|
||||
u32 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
|
||||
if (!FM10K_REMOVED(hw_addr)) \
|
||||
writel((val), &hw_addr[(reg)]); \
|
||||
} while (0)
|
||||
|
@ -42,7 +42,7 @@ do { \
|
|||
/* Switch register write operations, index using DWORDS */
|
||||
#define fm10k_write_sw_reg(hw, reg, val) \
|
||||
do { \
|
||||
u32 __iomem *sw_addr = ACCESS_ONCE((hw)->sw_addr); \
|
||||
u32 __iomem *sw_addr = READ_ONCE((hw)->sw_addr); \
|
||||
if (!FM10K_REMOVED(sw_addr)) \
|
||||
writel((val), &sw_addr[(reg)]); \
|
||||
} while (0)
|
||||
|
|
|
@ -966,7 +966,7 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
|
|||
return 0;
|
||||
}
|
||||
|
||||
u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
|
||||
static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
|
||||
{
|
||||
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
|
|||
int i;
|
||||
|
||||
/* if there is no iov_data then there is no mailbox to process */
|
||||
if (!ACCESS_ONCE(interface->iov_data))
|
||||
if (!READ_ONCE(interface->iov_data))
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -99,7 +99,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
|
|||
int i;
|
||||
|
||||
/* if there is no iov_data then there is no mailbox to process */
|
||||
if (!ACCESS_ONCE(interface->iov_data))
|
||||
if (!READ_ONCE(interface->iov_data))
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
|
@ -56,7 +56,7 @@ static int __init fm10k_init_module(void)
|
|||
pr_info("%s\n", fm10k_copyright);
|
||||
|
||||
/* create driver workqueue */
|
||||
fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0);
|
||||
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, fm10k_driver_name);
|
||||
|
||||
fm10k_dbg_init();
|
||||
|
||||
|
@ -651,11 +651,11 @@ static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
|
|||
static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
|
||||
{
|
||||
struct fm10k_intfc *interface = netdev_priv(skb->dev);
|
||||
struct fm10k_vxlan_port *vxlan_port;
|
||||
struct fm10k_udp_port *vxlan_port;
|
||||
|
||||
/* we can only offload a vxlan if we recognize it as such */
|
||||
vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
|
||||
struct fm10k_vxlan_port, list);
|
||||
struct fm10k_udp_port, list);
|
||||
|
||||
if (!vxlan_port)
|
||||
return NULL;
|
||||
|
@ -1128,13 +1128,24 @@ static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
|
|||
return ring->stats.packets;
|
||||
}
|
||||
|
||||
u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
|
||||
/**
|
||||
* fm10k_get_tx_pending - how many Tx descriptors not processed
|
||||
* @ring: the ring structure
|
||||
* @in_sw: is tx_pending being checked in SW or in HW?
|
||||
*/
|
||||
u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw)
|
||||
{
|
||||
struct fm10k_intfc *interface = ring->q_vector->interface;
|
||||
struct fm10k_hw *hw = &interface->hw;
|
||||
u32 head, tail;
|
||||
|
||||
u32 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
|
||||
u32 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
|
||||
if (likely(in_sw)) {
|
||||
head = ring->next_to_clean;
|
||||
tail = ring->next_to_use;
|
||||
} else {
|
||||
head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
|
||||
tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
|
||||
}
|
||||
|
||||
return ((head <= tail) ? tail : tail + ring->count) - head;
|
||||
}
|
||||
|
@ -1143,7 +1154,7 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
|
|||
{
|
||||
u32 tx_done = fm10k_get_tx_completed(tx_ring);
|
||||
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
|
||||
u32 tx_pending = fm10k_get_tx_pending(tx_ring);
|
||||
u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
|
||||
|
||||
clear_check_for_tx_hang(tx_ring);
|
||||
|
||||
|
@ -1397,7 +1408,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
|
|||
* that the calculation will never get below a 1. The bit shift
|
||||
* accounts for changes in the ITR due to PCIe link speed.
|
||||
*/
|
||||
itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8;
|
||||
itr_round = READ_ONCE(ring_container->itr_scale) + 8;
|
||||
avg_wire_size += BIT(itr_round) - 1;
|
||||
avg_wire_size >>= itr_round;
|
||||
|
||||
|
@ -1473,7 +1484,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
|
|||
/* re-enable the q_vector */
|
||||
fm10k_qv_enable(q_vector);
|
||||
|
||||
return 0;
|
||||
return min(work_done, budget - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -384,129 +384,171 @@ static void fm10k_request_glort_range(struct fm10k_intfc *interface)
|
|||
}
|
||||
|
||||
/**
|
||||
* fm10k_del_vxlan_port_all
|
||||
* fm10k_free_udp_port_info
|
||||
* @interface: board private structure
|
||||
*
|
||||
* This function frees the entire vxlan_port list
|
||||
* This function frees both geneve_port and vxlan_port structures
|
||||
**/
|
||||
static void fm10k_del_vxlan_port_all(struct fm10k_intfc *interface)
|
||||
static void fm10k_free_udp_port_info(struct fm10k_intfc *interface)
|
||||
{
|
||||
struct fm10k_vxlan_port *vxlan_port;
|
||||
struct fm10k_udp_port *port;
|
||||
|
||||
/* flush all entries from list */
|
||||
vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
|
||||
struct fm10k_vxlan_port, list);
|
||||
while (vxlan_port) {
|
||||
list_del(&vxlan_port->list);
|
||||
kfree(vxlan_port);
|
||||
vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
|
||||
struct fm10k_vxlan_port,
|
||||
list);
|
||||
/* flush all entries from vxlan list */
|
||||
port = list_first_entry_or_null(&interface->vxlan_port,
|
||||
struct fm10k_udp_port, list);
|
||||
while (port) {
|
||||
list_del(&port->list);
|
||||
kfree(port);
|
||||
port = list_first_entry_or_null(&interface->vxlan_port,
|
||||
struct fm10k_udp_port,
|
||||
list);
|
||||
}
|
||||
|
||||
/* flush all entries from geneve list */
|
||||
port = list_first_entry_or_null(&interface->geneve_port,
|
||||
struct fm10k_udp_port, list);
|
||||
while (port) {
|
||||
list_del(&port->list);
|
||||
kfree(port);
|
||||
port = list_first_entry_or_null(&interface->vxlan_port,
|
||||
struct fm10k_udp_port,
|
||||
list);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fm10k_restore_vxlan_port
|
||||
* fm10k_restore_udp_port_info
|
||||
* @interface: board private structure
|
||||
*
|
||||
* This function restores the value in the tunnel_cfg register after reset
|
||||
* This function restores the value in the tunnel_cfg register(s) after reset
|
||||
**/
|
||||
static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
|
||||
static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface)
|
||||
{
|
||||
struct fm10k_hw *hw = &interface->hw;
|
||||
struct fm10k_vxlan_port *vxlan_port;
|
||||
struct fm10k_udp_port *port;
|
||||
|
||||
/* only the PF supports configuring tunnels */
|
||||
if (hw->mac.type != fm10k_mac_pf)
|
||||
return;
|
||||
|
||||
vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
|
||||
struct fm10k_vxlan_port, list);
|
||||
port = list_first_entry_or_null(&interface->vxlan_port,
|
||||
struct fm10k_udp_port, list);
|
||||
|
||||
/* restore tunnel configuration register */
|
||||
fm10k_write_reg(hw, FM10K_TUNNEL_CFG,
|
||||
(vxlan_port ? ntohs(vxlan_port->port) : 0) |
|
||||
(port ? ntohs(port->port) : 0) |
|
||||
(ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT));
|
||||
|
||||
port = list_first_entry_or_null(&interface->geneve_port,
|
||||
struct fm10k_udp_port, list);
|
||||
|
||||
/* restore Geneve tunnel configuration register */
|
||||
fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE,
|
||||
(port ? ntohs(port->port) : 0));
|
||||
}
|
||||
|
||||
static struct fm10k_udp_port *
|
||||
fm10k_remove_tunnel_port(struct list_head *ports,
|
||||
struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct fm10k_udp_port *port;
|
||||
|
||||
list_for_each_entry(port, ports, list) {
|
||||
if ((port->port == ti->port) &&
|
||||
(port->sa_family == ti->sa_family)) {
|
||||
list_del(&port->list);
|
||||
return port;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void fm10k_insert_tunnel_port(struct list_head *ports,
|
||||
struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct fm10k_udp_port *port;
|
||||
|
||||
/* remove existing port entry from the list so that the newest items
|
||||
* are always at the tail of the list.
|
||||
*/
|
||||
port = fm10k_remove_tunnel_port(ports, ti);
|
||||
if (!port) {
|
||||
port = kmalloc(sizeof(*port), GFP_ATOMIC);
|
||||
if (!port)
|
||||
return;
|
||||
port->port = ti->port;
|
||||
port->sa_family = ti->sa_family;
|
||||
}
|
||||
|
||||
list_add_tail(&port->list, ports);
|
||||
}
|
||||
|
||||
/**
|
||||
* fm10k_add_vxlan_port
|
||||
* fm10k_udp_tunnel_add
|
||||
* @netdev: network interface device structure
|
||||
* @ti: Tunnel endpoint information
|
||||
*
|
||||
* This function is called when a new VXLAN interface has added a new port
|
||||
* number to the range that is currently in use for VXLAN. The new port
|
||||
* number is always added to the tail so that the port number list should
|
||||
* match the order in which the ports were allocated. The head of the list
|
||||
* is always used as the VXLAN port number for offloads.
|
||||
* This function is called when a new UDP tunnel port has been added.
|
||||
* Due to hardware restrictions, only one port per type can be offloaded at
|
||||
* once.
|
||||
**/
|
||||
static void fm10k_add_vxlan_port(struct net_device *dev,
|
||||
static void fm10k_udp_tunnel_add(struct net_device *dev,
|
||||
struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct fm10k_intfc *interface = netdev_priv(dev);
|
||||
struct fm10k_vxlan_port *vxlan_port;
|
||||
|
||||
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
|
||||
return;
|
||||
/* only the PF supports configuring tunnels */
|
||||
if (interface->hw.mac.type != fm10k_mac_pf)
|
||||
return;
|
||||
|
||||
/* existing ports are pulled out so our new entry is always last */
|
||||
fm10k_vxlan_port_for_each(vxlan_port, interface) {
|
||||
if ((vxlan_port->port == ti->port) &&
|
||||
(vxlan_port->sa_family == ti->sa_family)) {
|
||||
list_del(&vxlan_port->list);
|
||||
goto insert_tail;
|
||||
}
|
||||
switch (ti->type) {
|
||||
case UDP_TUNNEL_TYPE_VXLAN:
|
||||
fm10k_insert_tunnel_port(&interface->vxlan_port, ti);
|
||||
break;
|
||||
case UDP_TUNNEL_TYPE_GENEVE:
|
||||
fm10k_insert_tunnel_port(&interface->geneve_port, ti);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
/* allocate memory to track ports */
|
||||
vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC);
|
||||
if (!vxlan_port)
|
||||
return;
|
||||
vxlan_port->port = ti->port;
|
||||
vxlan_port->sa_family = ti->sa_family;
|
||||
|
||||
insert_tail:
|
||||
/* add new port value to list */
|
||||
list_add_tail(&vxlan_port->list, &interface->vxlan_port);
|
||||
|
||||
fm10k_restore_vxlan_port(interface);
|
||||
fm10k_restore_udp_port_info(interface);
|
||||
}
|
||||
|
||||
/**
|
||||
* fm10k_del_vxlan_port
|
||||
* fm10k_udp_tunnel_del
|
||||
* @netdev: network interface device structure
|
||||
* @ti: Tunnel endpoint information
|
||||
*
|
||||
* This function is called when a new VXLAN interface has freed a port
|
||||
* number from the range that is currently in use for VXLAN. The freed
|
||||
* port is removed from the list and the new head is used to determine
|
||||
* the port number for offloads.
|
||||
* This function is called when a new UDP tunnel port is deleted. The freed
|
||||
* port will be removed from the list, then we reprogram the offloaded port
|
||||
* based on the head of the list.
|
||||
**/
|
||||
static void fm10k_del_vxlan_port(struct net_device *dev,
|
||||
static void fm10k_udp_tunnel_del(struct net_device *dev,
|
||||
struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct fm10k_intfc *interface = netdev_priv(dev);
|
||||
struct fm10k_vxlan_port *vxlan_port;
|
||||
struct fm10k_udp_port *port = NULL;
|
||||
|
||||
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
|
||||
return;
|
||||
if (interface->hw.mac.type != fm10k_mac_pf)
|
||||
return;
|
||||
|
||||
/* find the port in the list and free it */
|
||||
fm10k_vxlan_port_for_each(vxlan_port, interface) {
|
||||
if ((vxlan_port->port == ti->port) &&
|
||||
(vxlan_port->sa_family == ti->sa_family)) {
|
||||
list_del(&vxlan_port->list);
|
||||
kfree(vxlan_port);
|
||||
break;
|
||||
}
|
||||
switch (ti->type) {
|
||||
case UDP_TUNNEL_TYPE_VXLAN:
|
||||
port = fm10k_remove_tunnel_port(&interface->vxlan_port, ti);
|
||||
break;
|
||||
case UDP_TUNNEL_TYPE_GENEVE:
|
||||
port = fm10k_remove_tunnel_port(&interface->geneve_port, ti);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
fm10k_restore_vxlan_port(interface);
|
||||
/* if we did remove a port we need to free its memory */
|
||||
kfree(port);
|
||||
|
||||
fm10k_restore_udp_port_info(interface);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -555,7 +597,6 @@ int fm10k_open(struct net_device *netdev)
|
|||
if (err)
|
||||
goto err_set_queues;
|
||||
|
||||
/* update VXLAN port configuration */
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
|
||||
fm10k_up(interface);
|
||||
|
@ -591,7 +632,7 @@ int fm10k_close(struct net_device *netdev)
|
|||
|
||||
fm10k_qv_free_irq(interface);
|
||||
|
||||
fm10k_del_vxlan_port_all(interface);
|
||||
fm10k_free_udp_port_info(interface);
|
||||
|
||||
fm10k_free_all_tx_resources(interface);
|
||||
fm10k_free_all_rx_resources(interface);
|
||||
|
@ -1055,7 +1096,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
|
|||
interface->xcast_mode = xcast_mode;
|
||||
|
||||
/* Restore tunnel configuration */
|
||||
fm10k_restore_vxlan_port(interface);
|
||||
fm10k_restore_udp_port_info(interface);
|
||||
}
|
||||
|
||||
void fm10k_reset_rx_state(struct fm10k_intfc *interface)
|
||||
|
@ -1098,7 +1139,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
|
|||
rcu_read_lock();
|
||||
|
||||
for (i = 0; i < interface->num_rx_queues; i++) {
|
||||
ring = ACCESS_ONCE(interface->rx_ring[i]);
|
||||
ring = READ_ONCE(interface->rx_ring[i]);
|
||||
|
||||
if (!ring)
|
||||
continue;
|
||||
|
@ -1114,7 +1155,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
|
|||
}
|
||||
|
||||
for (i = 0; i < interface->num_tx_queues; i++) {
|
||||
ring = ACCESS_ONCE(interface->tx_ring[i]);
|
||||
ring = READ_ONCE(interface->tx_ring[i]);
|
||||
|
||||
if (!ring)
|
||||
continue;
|
||||
|
@ -1299,7 +1340,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
|
|||
static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
|
||||
{
|
||||
struct fm10k_intfc *interface = netdev_priv(dev);
|
||||
struct fm10k_l2_accel *l2_accel = ACCESS_ONCE(interface->l2_accel);
|
||||
struct fm10k_l2_accel *l2_accel = READ_ONCE(interface->l2_accel);
|
||||
struct fm10k_dglort_cfg dglort = { 0 };
|
||||
struct fm10k_hw *hw = &interface->hw;
|
||||
struct net_device *sdev = priv;
|
||||
|
@ -1375,8 +1416,8 @@ static const struct net_device_ops fm10k_netdev_ops = {
|
|||
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
|
||||
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
|
||||
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
|
||||
.ndo_udp_tunnel_add = fm10k_add_vxlan_port,
|
||||
.ndo_udp_tunnel_del = fm10k_del_vxlan_port,
|
||||
.ndo_udp_tunnel_add = fm10k_udp_tunnel_add,
|
||||
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
|
||||
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
|
||||
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
|
|
@ -62,7 +62,7 @@ u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
|
|||
|
||||
u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
|
||||
{
|
||||
u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
|
||||
u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
|
||||
u32 value = 0;
|
||||
|
||||
if (FM10K_REMOVED(hw_addr))
|
||||
|
@ -133,7 +133,7 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
|
|||
/* check the real address space to see if we've recovered */
|
||||
hw_addr = READ_ONCE(interface->uc_addr);
|
||||
value = readl(hw_addr);
|
||||
if ((~value)) {
|
||||
if (~value) {
|
||||
interface->hw.hw_addr = interface->uc_addr;
|
||||
netif_device_attach(netdev);
|
||||
interface->flags |= FM10K_FLAG_RESET_REQUESTED;
|
||||
|
@ -734,15 +734,15 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
|
|||
u64 rdba = ring->dma;
|
||||
struct fm10k_hw *hw = &interface->hw;
|
||||
u32 size = ring->count * sizeof(union fm10k_rx_desc);
|
||||
u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF;
|
||||
u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
|
||||
u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
|
||||
u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
|
||||
u32 rxint = FM10K_INT_MAP_DISABLE;
|
||||
u8 rx_pause = interface->rx_pause;
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
|
||||
/* disable queue to avoid issues while updating state */
|
||||
fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0);
|
||||
rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
|
||||
rxqctl &= ~FM10K_RXQCTL_ENABLE;
|
||||
fm10k_write_flush(hw);
|
||||
|
||||
/* possible poll here to verify ring resources have been cleaned */
|
||||
|
@ -797,6 +797,8 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
|
|||
fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
|
||||
|
||||
/* enable queue */
|
||||
rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
|
||||
rxqctl |= FM10K_RXQCTL_ENABLE;
|
||||
fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
|
||||
|
||||
/* place buffers on ring for receive data */
|
||||
|
@ -1699,7 +1701,7 @@ void fm10k_down(struct fm10k_intfc *interface)
|
|||
|
||||
/* start checking at the last ring to have pending Tx */
|
||||
for (; i < interface->num_tx_queues; i++)
|
||||
if (fm10k_get_tx_pending(interface->tx_ring[i]))
|
||||
if (fm10k_get_tx_pending(interface->tx_ring[i], false))
|
||||
break;
|
||||
|
||||
/* if all the queues are drained, we can break now */
|
||||
|
@ -1835,8 +1837,9 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
|
|||
interface->tx_itr = FM10K_TX_ITR_DEFAULT;
|
||||
interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
|
||||
|
||||
/* initialize vxlan_port list */
|
||||
/* initialize udp port lists */
|
||||
INIT_LIST_HEAD(&interface->vxlan_port);
|
||||
INIT_LIST_HEAD(&interface->geneve_port);
|
||||
|
||||
netdev_rss_key_fill(rss_key, sizeof(rss_key));
|
||||
memcpy(interface->rssrk, rss_key, sizeof(rss_key));
|
||||
|
@ -1950,9 +1953,18 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct fm10k_intfc *interface;
|
||||
int err;
|
||||
|
||||
if (pdev->error_state != pci_channel_io_normal) {
|
||||
dev_err(&pdev->dev,
|
||||
"PCI device still in an error state. Unable to load...\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
err = pci_enable_device_mem(pdev);
|
||||
if (err)
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"PCI enable device failed: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
|
||||
if (err)
|
||||
|
@ -2275,7 +2287,7 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
|
|||
{
|
||||
pci_ers_result_t result;
|
||||
|
||||
if (pci_enable_device_mem(pdev)) {
|
||||
if (pci_reenable_device(pdev)) {
|
||||
dev_err(&pdev->dev,
|
||||
"Cannot re-enable PCI device after reset.\n");
|
||||
result = PCI_ERS_RESULT_DISCONNECT;
|
||||
|
|
|
@ -867,10 +867,6 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
|
|||
vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
|
||||
qmap_idx = qmap_stride * vf_idx;
|
||||
|
||||
/* MAP Tx queue back to 0 temporarily, and disable it */
|
||||
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
|
||||
fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
|
||||
|
||||
/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
|
||||
* used here to indicate to the VF that it will not have privilege to
|
||||
* write VLAN_TABLE. All policy is enforced on the PF but this allows
|
||||
|
@ -886,9 +882,35 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
|
|||
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
|
||||
vf_info->mac, vf_vid);
|
||||
|
||||
/* load onto outgoing mailbox, ignore any errors on enqueue */
|
||||
if (vf_info->mbx.ops.enqueue_tx)
|
||||
vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
|
||||
/* Configure Queue control register with new VLAN ID. The TXQCTL
|
||||
* register is RO from the VF, so the PF must do this even in the
|
||||
* case of notifying the VF of a new VID via the mailbox.
|
||||
*/
|
||||
txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
|
||||
FM10K_TXQCTL_VID_MASK;
|
||||
txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
|
||||
FM10K_TXQCTL_VF | vf_idx;
|
||||
|
||||
for (i = 0; i < queues_per_pool; i++)
|
||||
fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
|
||||
|
||||
/* try loading a message onto outgoing mailbox first */
|
||||
if (vf_info->mbx.ops.enqueue_tx) {
|
||||
err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
|
||||
if (err != FM10K_MBX_ERR_NO_MBX)
|
||||
return err;
|
||||
err = 0;
|
||||
}
|
||||
|
||||
/* If we aren't connected to a mailbox, this is most likely because
|
||||
* the VF driver is not running. It should thus be safe to re-map
|
||||
* queues and use the registers to pass the MAC address so that the VF
|
||||
* driver gets correct information during its initialization.
|
||||
*/
|
||||
|
||||
/* MAP Tx queue back to 0 temporarily, and disable it */
|
||||
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
|
||||
fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
|
||||
|
||||
/* verify ring has disabled before modifying base address registers */
|
||||
txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
|
||||
|
@ -927,16 +949,6 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
|
|||
FM10K_TDLEN_ITR_SCALE_SHIFT);
|
||||
|
||||
err_out:
|
||||
/* configure Queue control register */
|
||||
txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
|
||||
FM10K_TXQCTL_VID_MASK;
|
||||
txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
|
||||
FM10K_TXQCTL_VF | vf_idx;
|
||||
|
||||
/* assign VLAN ID */
|
||||
for (i = 0; i < queues_per_pool; i++)
|
||||
fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
|
||||
|
||||
/* restore the queue back to VF ownership */
|
||||
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
|
||||
return err;
|
||||
|
|
|
@ -154,6 +154,7 @@ struct fm10k_hw;
|
|||
#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000
|
||||
#define FM10K_TUNNEL_CFG 0x0040
|
||||
#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16
|
||||
#define FM10K_TUNNEL_CFG_GENEVE 0x0041
|
||||
#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050)
|
||||
#define FM10K_SWPRI_MAX 16
|
||||
#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800)
|
||||
|
|
Loading…
Reference in New Issue