2018-03-23 01:08:48 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2018-04-26 23:08:09 +08:00
|
|
|
/* Copyright(c) 2007 - 2018 Intel Corporation. */
|
2008-01-24 18:22:38 +08:00
|
|
|
|
|
|
|
/* Linux PRO/1000 Ethernet Driver main header file */
|
|
|
|
|
|
|
|
#ifndef _IGB_H_
|
|
|
|
#define _IGB_H_
|
|
|
|
|
|
|
|
#include "e1000_mac.h"
|
|
|
|
#include "e1000_82575.h"
|
|
|
|
|
2014-12-22 02:46:56 +08:00
|
|
|
#include <linux/timecounter.h>
|
2009-02-12 13:03:43 +08:00
|
|
|
#include <linux/net_tstamp.h>
|
2012-03-16 18:55:32 +08:00
|
|
|
#include <linux/ptp_clock_kernel.h>
|
2011-07-21 11:27:27 +08:00
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/if_vlan.h>
|
2012-12-07 11:00:30 +08:00
|
|
|
#include <linux/i2c.h>
|
|
|
|
#include <linux/i2c-algo-bit.h>
|
2013-12-10 15:58:34 +08:00
|
|
|
#include <linux/pci.h>
|
2014-03-12 11:58:22 +08:00
|
|
|
#include <linux/mdio.h>
|
2009-02-12 13:03:41 +08:00
|
|
|
|
2008-01-24 18:22:38 +08:00
|
|
|
struct igb_adapter;
|
|
|
|
|
2013-02-23 15:29:56 +08:00
|
|
|
#define E1000_PCS_CFG_IGN_SD 1
|
2012-11-22 10:49:22 +08:00
|
|
|
|
2011-08-26 15:45:47 +08:00
|
|
|
/* Interrupt defines */
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_START_ITR 648 /* ~6000 ints/sec */
|
|
|
|
#define IGB_4K_ITR 980
|
|
|
|
#define IGB_20K_ITR 196
|
|
|
|
#define IGB_70K_ITR 56
|
2008-01-24 18:22:38 +08:00
|
|
|
|
|
|
|
/* TX/RX descriptor defines */
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_DEFAULT_TXD 256
|
|
|
|
#define IGB_DEFAULT_TX_WORK 128
|
|
|
|
#define IGB_MIN_TXD 80
|
|
|
|
#define IGB_MAX_TXD 4096
|
2008-01-24 18:22:38 +08:00
|
|
|
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_DEFAULT_RXD 256
|
|
|
|
#define IGB_MIN_RXD 80
|
|
|
|
#define IGB_MAX_RXD 4096
|
2008-01-24 18:22:38 +08:00
|
|
|
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_DEFAULT_ITR 3 /* dynamic */
|
|
|
|
#define IGB_MAX_ITR_USECS 10000
|
|
|
|
#define IGB_MIN_ITR_USECS 10
|
|
|
|
#define NON_Q_VECTORS 1
|
|
|
|
#define MAX_Q_VECTORS 8
|
2013-12-10 15:58:34 +08:00
|
|
|
#define MAX_MSIX_ENTRIES 10
|
2008-01-24 18:22:38 +08:00
|
|
|
|
|
|
|
/* Transmit and receive queues */
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_MAX_RX_QUEUES 8
|
|
|
|
#define IGB_MAX_RX_QUEUES_82575 4
|
|
|
|
#define IGB_MAX_RX_QUEUES_I211 2
|
|
|
|
#define IGB_MAX_TX_QUEUES 8
|
|
|
|
#define IGB_MAX_VF_MC_ENTRIES 30
|
|
|
|
#define IGB_MAX_VF_FUNCTIONS 8
|
|
|
|
#define IGB_MAX_VFTA_ENTRIES 128
|
|
|
|
#define IGB_82576_VF_DEV_ID 0x10CA
|
|
|
|
#define IGB_I350_VF_DEV_ID 0x1520
|
2009-02-20 12:40:07 +08:00
|
|
|
|
2012-06-15 00:04:19 +08:00
|
|
|
/* NVM version defines */
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_MAJOR_MASK 0xF000
|
|
|
|
#define IGB_MINOR_MASK 0x0FF0
|
|
|
|
#define IGB_BUILD_MASK 0x000F
|
|
|
|
#define IGB_COMB_VER_MASK 0x00FF
|
|
|
|
#define IGB_MAJOR_SHIFT 12
|
|
|
|
#define IGB_MINOR_SHIFT 4
|
|
|
|
#define IGB_COMB_VER_SHFT 8
|
|
|
|
#define IGB_NVM_VER_INVALID 0xFFFF
|
|
|
|
#define IGB_ETRACK_SHIFT 16
|
|
|
|
#define NVM_ETRACK_WORD 0x0042
|
|
|
|
#define NVM_COMB_VER_OFF 0x0083
|
|
|
|
#define NVM_COMB_VER_PTR 0x003d
|
2012-06-15 00:04:19 +08:00
|
|
|
|
2016-05-04 07:10:56 +08:00
|
|
|
/* Transmit and receive latency (for PTP timestamps) */
|
|
|
|
#define IGB_I210_TX_LATENCY_10 9542
|
|
|
|
#define IGB_I210_TX_LATENCY_100 1024
|
|
|
|
#define IGB_I210_TX_LATENCY_1000 178
|
|
|
|
#define IGB_I210_RX_LATENCY_10 20662
|
|
|
|
#define IGB_I210_RX_LATENCY_100 2213
|
|
|
|
#define IGB_I210_RX_LATENCY_1000 448
|
|
|
|
|
2009-02-20 12:40:07 +08:00
|
|
|
struct vf_data_storage {
|
|
|
|
unsigned char vf_mac_addresses[ETH_ALEN];
|
|
|
|
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
|
|
|
|
u16 num_vf_mc_hashes;
|
2009-10-28 07:46:57 +08:00
|
|
|
u32 flags;
|
|
|
|
unsigned long last_nack;
|
2010-02-10 09:44:24 +08:00
|
|
|
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
|
|
|
|
u16 pf_qos;
|
2011-02-08 10:28:46 +08:00
|
|
|
u16 tx_rate;
|
2013-03-04 04:27:48 +08:00
|
|
|
bool spoofchk_enabled;
|
2018-01-17 18:53:39 +08:00
|
|
|
bool trusted;
|
2009-02-20 12:40:07 +08:00
|
|
|
};
|
|
|
|
|
2017-03-07 16:20:26 +08:00
|
|
|
/* Number of unicast MAC filters reserved for the PF in the RAR registers */
|
|
|
|
#define IGB_PF_MAC_FILTERS_RESERVED 3
|
|
|
|
|
|
|
|
struct vf_mac_filter {
|
|
|
|
struct list_head l;
|
|
|
|
int vf;
|
|
|
|
bool free;
|
|
|
|
u8 vf_mac[ETH_ALEN];
|
|
|
|
};
|
|
|
|
|
2009-10-28 07:46:57 +08:00
|
|
|
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
|
2009-10-28 07:47:16 +08:00
|
|
|
#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
|
|
|
|
#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
|
2010-02-10 09:44:24 +08:00
|
|
|
#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
|
2009-10-28 07:46:57 +08:00
|
|
|
|
2008-01-24 18:22:38 +08:00
|
|
|
/* RX descriptor control thresholds.
|
|
|
|
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
|
|
|
|
* descriptors available in its onboard memory.
|
|
|
|
* Setting this to 0 disables RX descriptor prefetch.
|
|
|
|
* HTHRESH - MAC will only prefetch if there are at least this many descriptors
|
|
|
|
* available in host memory.
|
|
|
|
* If PTHRESH is 0, this should also be 0.
|
|
|
|
* WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
|
|
|
|
* descriptors until either it has this many to write back, or the
|
|
|
|
* ITR timer expires.
|
|
|
|
*/
|
2013-04-19 06:21:30 +08:00
|
|
|
#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_RX_HTHRESH 8
|
2013-04-19 06:21:30 +08:00
|
|
|
#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_TX_HTHRESH 1
|
|
|
|
#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
|
2013-12-10 15:58:34 +08:00
|
|
|
(adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
|
2013-12-10 15:58:34 +08:00
|
|
|
(adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
|
2008-01-24 18:22:38 +08:00
|
|
|
|
|
|
|
/* this is the size past which hardware will drop packets when setting LPE=0 */
|
|
|
|
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
|
|
|
|
|
|
|
|
/* Supported Rx Buffer Sizes */
|
2012-09-25 08:31:12 +08:00
|
|
|
#define IGB_RXBUFFER_256 256
|
|
|
|
#define IGB_RXBUFFER_2048 2048
|
2017-02-07 10:27:03 +08:00
|
|
|
#define IGB_RXBUFFER_3072 3072
|
2012-09-25 08:31:12 +08:00
|
|
|
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
|
2017-02-07 10:26:15 +08:00
|
|
|
#define IGB_TS_HDR_LEN 16
|
2008-01-24 18:22:38 +08:00
|
|
|
|
2017-02-07 10:26:15 +08:00
|
|
|
#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
#define IGB_MAX_FRAME_BUILD_SKB \
|
|
|
|
(SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
|
|
|
|
#else
|
|
|
|
#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
|
|
|
|
#endif
|
|
|
|
|
2008-01-24 18:22:38 +08:00
|
|
|
/* How many Rx Buffers do we bundle into one write to the hardware ? */
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
|
2008-01-24 18:22:38 +08:00
|
|
|
|
2017-02-07 10:25:26 +08:00
|
|
|
#define IGB_RX_DMA_ATTR \
|
|
|
|
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
|
|
|
|
|
2013-02-23 15:29:56 +08:00
|
|
|
#define AUTO_ALL_MODES 0
|
|
|
|
#define IGB_EEPROM_APME 0x0400
|
2008-01-24 18:22:38 +08:00
|
|
|
|
|
|
|
#ifndef IGB_MASTER_SLAVE
|
|
|
|
/* Switch to override PHY master/slave setting */
|
|
|
|
#define IGB_MASTER_SLAVE e1000_ms_hw_default
|
|
|
|
#endif
|
|
|
|
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_MNG_VLAN_NONE -1
|
2008-01-24 18:22:38 +08:00
|
|
|
|
2012-11-13 12:03:23 +08:00
|
|
|
enum igb_tx_flags {
|
|
|
|
/* cmd_type flags */
|
|
|
|
IGB_TX_FLAGS_VLAN = 0x01,
|
|
|
|
IGB_TX_FLAGS_TSO = 0x02,
|
|
|
|
IGB_TX_FLAGS_TSTAMP = 0x04,
|
|
|
|
|
|
|
|
/* olinfo flags */
|
|
|
|
IGB_TX_FLAGS_IPV4 = 0x10,
|
|
|
|
IGB_TX_FLAGS_CSUM = 0x20,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* VLAN info */
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
|
2011-08-26 15:44:59 +08:00
|
|
|
#define IGB_TX_FLAGS_VLAN_SHIFT 16
|
|
|
|
|
2013-02-23 15:29:56 +08:00
|
|
|
/* The largest size we can write to the descriptor is 65535. In order to
|
2013-02-09 12:27:48 +08:00
|
|
|
* maintain a power of two alignment we have to limit ourselves to 32K.
|
|
|
|
*/
|
|
|
|
#define IGB_MAX_TXD_PWR 15
|
2016-04-14 07:08:28 +08:00
|
|
|
#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR)
|
2013-02-09 12:27:48 +08:00
|
|
|
|
|
|
|
/* Tx Descriptors needed, worst case */
|
|
|
|
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
|
|
|
|
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
|
|
|
|
|
2013-04-11 14:36:35 +08:00
|
|
|
/* EEPROM byte offsets */
|
|
|
|
#define IGB_SFF_8472_SWAP 0x5C
|
|
|
|
#define IGB_SFF_8472_COMP 0x5E
|
|
|
|
|
|
|
|
/* Bitmasks */
|
|
|
|
#define IGB_SFF_ADDRESSING_MODE 0x4
|
|
|
|
#define IGB_SFF_8472_UNSUP 0x00
|
|
|
|
|
2008-01-24 18:22:38 +08:00
|
|
|
/* wrapper around a pointer to a socket buffer,
|
2013-02-23 15:29:56 +08:00
|
|
|
* so a DMA handle can be stored along with the buffer
|
|
|
|
*/
|
2011-08-26 15:44:22 +08:00
|
|
|
struct igb_tx_buffer {
|
2011-08-26 15:44:43 +08:00
|
|
|
union e1000_adv_tx_desc *next_to_watch;
|
2011-08-26 15:44:22 +08:00
|
|
|
unsigned long time_stamp;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned int bytecount;
|
|
|
|
u16 gso_segs;
|
2011-08-26 15:45:15 +08:00
|
|
|
__be16 protocol;
|
2014-04-11 09:45:34 +08:00
|
|
|
|
2012-09-18 09:56:27 +08:00
|
|
|
DEFINE_DMA_UNMAP_ADDR(dma);
|
|
|
|
DEFINE_DMA_UNMAP_LEN(len);
|
2011-08-26 15:45:09 +08:00
|
|
|
u32 tx_flags;
|
2011-08-26 15:44:22 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct igb_rx_buffer {
|
2008-01-24 18:22:38 +08:00
|
|
|
dma_addr_t dma;
|
2011-08-26 15:44:22 +08:00
|
|
|
struct page *page;
|
2016-12-15 07:05:34 +08:00
|
|
|
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
|
|
|
|
__u32 page_offset;
|
|
|
|
#else
|
|
|
|
__u16 page_offset;
|
|
|
|
#endif
|
|
|
|
__u16 pagecnt_bias;
|
2008-01-24 18:22:38 +08:00
|
|
|
};
|
|
|
|
|
2009-05-26 21:50:31 +08:00
|
|
|
struct igb_tx_queue_stats {
|
2008-01-24 18:22:38 +08:00
|
|
|
u64 packets;
|
|
|
|
u64 bytes;
|
2009-10-27 23:52:27 +08:00
|
|
|
u64 restart_queue;
|
2010-10-16 01:27:10 +08:00
|
|
|
u64 restart_queue2;
|
2008-01-24 18:22:38 +08:00
|
|
|
};
|
|
|
|
|
2009-05-26 21:50:31 +08:00
|
|
|
struct igb_rx_queue_stats {
|
|
|
|
u64 packets;
|
|
|
|
u64 bytes;
|
|
|
|
u64 drops;
|
2009-10-27 23:52:27 +08:00
|
|
|
u64 csum_err;
|
|
|
|
u64 alloc_failed;
|
2009-05-26 21:50:31 +08:00
|
|
|
};
|
|
|
|
|
2011-08-26 15:45:47 +08:00
|
|
|
struct igb_ring_container {
|
|
|
|
struct igb_ring *ring; /* pointer to linked list of rings */
|
|
|
|
unsigned int total_bytes; /* total bytes processed this int */
|
|
|
|
unsigned int total_packets; /* total packets processed this int */
|
|
|
|
u16 work_limit; /* total work allowed per interrupt */
|
|
|
|
u8 count; /* total number of rings in vector */
|
|
|
|
u8 itr; /* current ITR setting for ring */
|
|
|
|
};
|
|
|
|
|
2009-10-27 23:49:27 +08:00
|
|
|
struct igb_ring {
|
2011-08-26 15:43:48 +08:00
|
|
|
struct igb_q_vector *q_vector; /* backlink to q_vector */
|
|
|
|
struct net_device *netdev; /* back pointer to net_device */
|
|
|
|
struct device *dev; /* device pointer for dma mapping */
|
2011-08-26 15:44:22 +08:00
|
|
|
union { /* array of buffer info structs */
|
|
|
|
struct igb_tx_buffer *tx_buffer_info;
|
|
|
|
struct igb_rx_buffer *rx_buffer_info;
|
|
|
|
};
|
2011-08-26 15:43:48 +08:00
|
|
|
void *desc; /* descriptor ring memory */
|
|
|
|
unsigned long flags; /* ring specific flags */
|
|
|
|
void __iomem *tail; /* pointer to ring tail register */
|
2012-09-25 08:31:17 +08:00
|
|
|
dma_addr_t dma; /* phys address of the ring */
|
|
|
|
unsigned int size; /* length of desc. ring in bytes */
|
2011-08-26 15:43:48 +08:00
|
|
|
|
|
|
|
u16 count; /* number of desc. in the ring */
|
|
|
|
u8 queue_index; /* logical index of the ring*/
|
|
|
|
u8 reg_idx; /* physical index of the ring */
|
2018-07-04 06:42:59 +08:00
|
|
|
bool launchtime_enable; /* true if LaunchTime is enabled */
|
2017-10-17 09:01:28 +08:00
|
|
|
bool cbs_enable; /* indicates if CBS is enabled */
|
|
|
|
s32 idleslope; /* idleSlope in kbps */
|
|
|
|
s32 sendslope; /* sendSlope in kbps */
|
|
|
|
s32 hicredit; /* hiCredit in bytes */
|
|
|
|
s32 locredit; /* loCredit in bytes */
|
2011-08-26 15:43:48 +08:00
|
|
|
|
|
|
|
/* everything past this point are written often */
|
2012-09-25 08:31:17 +08:00
|
|
|
u16 next_to_clean;
|
2008-01-24 18:22:38 +08:00
|
|
|
u16 next_to_use;
|
2012-09-25 08:31:02 +08:00
|
|
|
u16 next_to_alloc;
|
2008-01-24 18:22:38 +08:00
|
|
|
|
|
|
|
union {
|
|
|
|
/* TX */
|
|
|
|
struct {
|
2009-05-26 21:50:31 +08:00
|
|
|
struct igb_tx_queue_stats tx_stats;
|
2010-10-16 01:27:10 +08:00
|
|
|
struct u64_stats_sync tx_syncp;
|
|
|
|
struct u64_stats_sync tx_syncp2;
|
2008-01-24 18:22:38 +08:00
|
|
|
};
|
|
|
|
/* RX */
|
|
|
|
struct {
|
2012-09-25 08:30:52 +08:00
|
|
|
struct sk_buff *skb;
|
2009-05-26 21:50:31 +08:00
|
|
|
struct igb_rx_queue_stats rx_stats;
|
2010-10-16 01:27:10 +08:00
|
|
|
struct u64_stats_sync rx_syncp;
|
2008-01-24 18:22:38 +08:00
|
|
|
};
|
|
|
|
};
|
2012-09-25 08:31:17 +08:00
|
|
|
} ____cacheline_internodealigned_in_smp;
|
|
|
|
|
|
|
|
struct igb_q_vector {
|
|
|
|
struct igb_adapter *adapter; /* backlink */
|
|
|
|
int cpu; /* CPU for DCA */
|
|
|
|
u32 eims_value; /* EIMS mask value */
|
|
|
|
|
|
|
|
u16 itr_val;
|
|
|
|
u8 set_itr;
|
|
|
|
void __iomem *itr_register;
|
|
|
|
|
|
|
|
struct igb_ring_container rx, tx;
|
|
|
|
|
|
|
|
struct napi_struct napi;
|
|
|
|
struct rcu_head rcu; /* to avoid race with update stats on free */
|
|
|
|
char name[IFNAMSIZ + 9];
|
|
|
|
|
|
|
|
/* for dynamic allocation of rings associated with this q_vector */
|
|
|
|
struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
|
2008-01-24 18:22:38 +08:00
|
|
|
};
|
|
|
|
|
2011-08-26 15:45:36 +08:00
|
|
|
enum e1000_ring_flags_t {
|
2017-02-07 10:27:03 +08:00
|
|
|
IGB_RING_FLAG_RX_3K_BUFFER,
|
2017-02-07 10:27:14 +08:00
|
|
|
IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
|
2011-08-26 15:45:36 +08:00
|
|
|
IGB_RING_FLAG_RX_SCTP_CSUM,
|
2011-08-26 15:47:11 +08:00
|
|
|
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
|
2011-08-26 15:45:36 +08:00
|
|
|
IGB_RING_FLAG_TX_CTX_IDX,
|
|
|
|
IGB_RING_FLAG_TX_DETECT_HANG
|
|
|
|
};
|
2009-10-27 23:52:46 +08:00
|
|
|
|
2017-02-07 10:27:03 +08:00
|
|
|
#define ring_uses_large_buffer(ring) \
|
|
|
|
test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
|
|
|
|
#define set_ring_uses_large_buffer(ring) \
|
|
|
|
set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
|
|
|
|
#define clear_ring_uses_large_buffer(ring) \
|
|
|
|
clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
|
|
|
|
|
2017-02-07 10:27:14 +08:00
|
|
|
#define ring_uses_build_skb(ring) \
|
|
|
|
test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
|
|
|
|
#define set_ring_build_skb_enabled(ring) \
|
|
|
|
set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
|
|
|
|
#define clear_ring_build_skb_enabled(ring) \
|
|
|
|
clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
|
|
|
|
|
2017-02-07 10:27:03 +08:00
|
|
|
static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
|
|
|
|
{
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
if (ring_uses_large_buffer(ring))
|
|
|
|
return IGB_RXBUFFER_3072;
|
2017-02-07 10:27:14 +08:00
|
|
|
|
|
|
|
if (ring_uses_build_skb(ring))
|
|
|
|
return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
|
2017-02-07 10:27:03 +08:00
|
|
|
#endif
|
|
|
|
return IGB_RXBUFFER_2048;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
|
|
|
|
{
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
if (ring_uses_large_buffer(ring))
|
|
|
|
return 1;
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
|
|
|
|
|
2011-08-26 15:44:48 +08:00
|
|
|
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
|
2009-10-27 23:52:46 +08:00
|
|
|
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_RX_DESC(R, i) \
|
2011-08-26 15:44:05 +08:00
|
|
|
(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_TX_DESC(R, i) \
|
2011-08-26 15:44:05 +08:00
|
|
|
(&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_TX_CTXTDESC(R, i) \
|
2011-08-26 15:44:05 +08:00
|
|
|
(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
|
2008-01-24 18:22:38 +08:00
|
|
|
|
2011-08-26 15:46:03 +08:00
|
|
|
/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
|
|
|
|
static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
|
|
|
|
const u32 stat_err_bits)
|
|
|
|
{
|
|
|
|
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
|
|
|
|
}
|
|
|
|
|
2009-10-27 23:54:23 +08:00
|
|
|
/* igb_desc_unused - calculate if we have unused descriptors */
|
|
|
|
static inline int igb_desc_unused(struct igb_ring *ring)
|
|
|
|
{
|
|
|
|
if (ring->next_to_clean > ring->next_to_use)
|
|
|
|
return ring->next_to_clean - ring->next_to_use - 1;
|
|
|
|
|
|
|
|
return ring->count + ring->next_to_clean - ring->next_to_use - 1;
|
|
|
|
}
|
|
|
|
|
2012-12-07 11:01:42 +08:00
|
|
|
#ifdef CONFIG_IGB_HWMON
|
|
|
|
|
|
|
|
#define IGB_HWMON_TYPE_LOC 0
|
|
|
|
#define IGB_HWMON_TYPE_TEMP 1
|
|
|
|
#define IGB_HWMON_TYPE_CAUTION 2
|
|
|
|
#define IGB_HWMON_TYPE_MAX 3
|
|
|
|
|
|
|
|
struct hwmon_attr {
|
|
|
|
struct device_attribute dev_attr;
|
|
|
|
struct e1000_hw *hw;
|
|
|
|
struct e1000_thermal_diode_data *sensor;
|
|
|
|
char name[12];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hwmon_buff {
|
2013-11-26 15:15:23 +08:00
|
|
|
struct attribute_group group;
|
|
|
|
const struct attribute_group *groups[2];
|
|
|
|
struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
|
|
|
|
struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
|
2012-12-07 11:01:42 +08:00
|
|
|
unsigned int n_hwmon;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2016-07-06 13:22:55 +08:00
|
|
|
/* The number of L2 ether-type filter registers, Index 3 is reserved
|
|
|
|
* for PTP 1588 timestamp
|
|
|
|
*/
|
|
|
|
#define MAX_ETYPE_FILTER (4 - 1)
|
|
|
|
/* ETQF filter list: one static filter per filter consumer. This is
|
|
|
|
* to avoid filter collisions later. Add new filters here!!
|
|
|
|
*
|
|
|
|
* Current filters: Filter 3
|
|
|
|
*/
|
|
|
|
#define IGB_ETQF_FILTER_1588 3
|
|
|
|
|
2014-11-22 04:51:26 +08:00
|
|
|
#define IGB_N_EXTTS 2
|
|
|
|
#define IGB_N_PEROUT 2
|
|
|
|
#define IGB_N_SDP 4
|
2013-08-01 04:19:48 +08:00
|
|
|
#define IGB_RETA_SIZE 128
|
|
|
|
|
2016-07-06 13:22:54 +08:00
|
|
|
enum igb_filter_match_flags {
|
2016-07-06 13:22:55 +08:00
|
|
|
IGB_FILTER_FLAG_ETHER_TYPE = 0x1,
|
2016-07-06 13:22:56 +08:00
|
|
|
IGB_FILTER_FLAG_VLAN_TCI = 0x2,
|
2018-04-11 01:49:56 +08:00
|
|
|
IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
|
|
|
|
IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8,
|
2016-07-06 13:22:54 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define IGB_MAX_RXNFC_FILTERS 16
|
|
|
|
|
|
|
|
/* RX network flow classification data structure */
|
|
|
|
struct igb_nfc_input {
|
|
|
|
/* Byte layout in order, all values with MSB first:
|
2016-07-06 13:22:55 +08:00
|
|
|
* match_flags - 1 byte
|
|
|
|
* etype - 2 bytes
|
2016-07-06 13:22:56 +08:00
|
|
|
* vlan_tci - 2 bytes
|
2016-07-06 13:22:55 +08:00
|
|
|
*/
|
2016-07-06 13:22:54 +08:00
|
|
|
u8 match_flags;
|
2016-07-06 13:22:55 +08:00
|
|
|
__be16 etype;
|
2016-07-06 13:22:56 +08:00
|
|
|
__be16 vlan_tci;
|
2018-04-11 01:49:56 +08:00
|
|
|
u8 src_addr[ETH_ALEN];
|
|
|
|
u8 dst_addr[ETH_ALEN];
|
2016-07-06 13:22:54 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct igb_nfc_filter {
|
|
|
|
struct hlist_node nfc_node;
|
|
|
|
struct igb_nfc_input filter;
|
2018-04-11 01:49:59 +08:00
|
|
|
unsigned long cookie;
|
2016-07-06 13:22:55 +08:00
|
|
|
u16 etype_reg_index;
|
2016-07-06 13:22:54 +08:00
|
|
|
u16 sw_idx;
|
|
|
|
u16 action;
|
|
|
|
};
|
|
|
|
|
2017-03-07 16:20:25 +08:00
|
|
|
struct igb_mac_addr {
|
|
|
|
u8 addr[ETH_ALEN];
|
|
|
|
u8 queue;
|
|
|
|
u8 state; /* bitmask */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define IGB_MAC_STATE_DEFAULT 0x1
|
|
|
|
#define IGB_MAC_STATE_IN_USE 0x2
|
2018-04-11 01:49:53 +08:00
|
|
|
#define IGB_MAC_STATE_SRC_ADDR 0x4
|
2018-04-11 01:49:54 +08:00
|
|
|
#define IGB_MAC_STATE_QUEUE_STEERING 0x8
|
2017-03-07 16:20:25 +08:00
|
|
|
|
2008-01-24 18:22:38 +08:00
|
|
|
/* board specific private data structure */
|
|
|
|
struct igb_adapter {
|
2011-07-21 11:27:27 +08:00
|
|
|
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
2011-08-26 15:43:48 +08:00
|
|
|
|
|
|
|
struct net_device *netdev;
|
|
|
|
|
|
|
|
unsigned long state;
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
unsigned int num_q_vectors;
|
2013-12-10 15:58:34 +08:00
|
|
|
struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
|
2009-10-28 07:50:38 +08:00
|
|
|
|
2008-01-24 18:22:38 +08:00
|
|
|
/* Interrupt Throttle Rate */
|
2009-10-28 07:45:42 +08:00
|
|
|
u32 rx_itr_setting;
|
|
|
|
u32 tx_itr_setting;
|
2008-01-24 18:22:38 +08:00
|
|
|
u16 tx_itr;
|
|
|
|
u16 rx_itr;
|
|
|
|
|
|
|
|
/* TX */
|
2011-10-05 21:35:24 +08:00
|
|
|
u16 tx_work_limit;
|
2008-01-24 18:22:38 +08:00
|
|
|
u32 tx_timeout_count;
|
2011-08-26 15:43:48 +08:00
|
|
|
int num_tx_queues;
|
|
|
|
struct igb_ring *tx_ring[16];
|
2008-01-24 18:22:38 +08:00
|
|
|
|
|
|
|
/* RX */
|
|
|
|
int num_rx_queues;
|
2011-08-26 15:43:48 +08:00
|
|
|
struct igb_ring *rx_ring[16];
|
2008-01-24 18:22:38 +08:00
|
|
|
|
|
|
|
u32 max_frame_size;
|
|
|
|
u32 min_frame_size;
|
|
|
|
|
2011-08-26 15:43:48 +08:00
|
|
|
struct timer_list watchdog_timer;
|
|
|
|
struct timer_list phy_info_timer;
|
|
|
|
|
|
|
|
u16 mng_vlan_id;
|
|
|
|
u32 bd_number;
|
|
|
|
u32 wol;
|
|
|
|
u32 en_mng_pt;
|
|
|
|
u16 link_speed;
|
|
|
|
u16 link_duplex;
|
|
|
|
|
igb: don't unmap NULL hw_addr
I've got a startech thunderbolt dock someone loaned me, which among other
things, has the following device in it:
08:00.0 Ethernet controller: Intel Corporation I210 Gigabit Network Connection (rev 03)
This hotplugs just fine (kernel 4.2.0 plus a patch or two here):
[ 863.020315] igb: Intel(R) Gigabit Ethernet Network Driver - version 5.2.18-k
[ 863.020316] igb: Copyright (c) 2007-2014 Intel Corporation.
[ 863.028657] igb 0000:08:00.0: enabling device (0000 -> 0002)
[ 863.062089] igb 0000:08:00.0: added PHC on eth0
[ 863.062090] igb 0000:08:00.0: Intel(R) Gigabit Ethernet Network Connection
[ 863.062091] igb 0000:08:00.0: eth0: (PCIe:2.5Gb/s:Width x1) e8:ea:6a:00:1b:2a
[ 863.062194] igb 0000:08:00.0: eth0: PBA No: 000200-000
[ 863.062196] igb 0000:08:00.0: Using MSI-X interrupts. 4 rx queue(s), 4 tx queue(s)
[ 863.064889] igb 0000:08:00.0 enp8s0: renamed from eth0
But disconnecting it is another story:
[ 1002.807932] igb 0000:08:00.0: removed PHC on enp8s0
[ 1002.807944] igb 0000:08:00.0 enp8s0: PCIe link lost, device now detached
[ 1003.341141] ------------[ cut here ]------------
[ 1003.341148] WARNING: CPU: 0 PID: 199 at lib/iomap.c:43 bad_io_access+0x38/0x40()
[ 1003.341149] Bad IO access at port 0x0 ()
[ 1003.342767] Modules linked in: snd_usb_audio snd_usbmidi_lib snd_rawmidi igb dca firewire_ohci firewire_core crc_itu_t rfcomm ctr ccm arc4 iwlmvm mac80211 fuse xt_CHECKSUM ipt_MASQUERADE
nf_nat_masquerade_ipv4 tun ip6t_rpfilter ip6t_REJECT nf_reject_ipv6 ipt_REJECT nf_reject_ipv4 xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_nat
nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw ip6table_filter ip6_tables iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat
nf_conntrack iptable_mangle iptable_security iptable_raw iptable_filter bnep dm_mirror dm_region_hash dm_log dm_mod coretemp x86_pkg_temp_thermal intel_powerclamp kvm_intel snd_hda_codec_hdmi kvm
crct10dif_pclmul crc32_pclmul ghash_clmulni_intel drbg
[ 1003.342793] ansi_cprng aesni_intel hp_wmi aes_x86_64 iTCO_wdt lrw iTCO_vendor_support ppdev gf128mul sparse_keymap glue_helper ablk_helper cryptd snd_hda_codec_realtek snd_hda_codec_generic
microcode snd_hda_intel uvcvideo iwlwifi snd_hda_codec videobuf2_vmalloc videobuf2_memops snd_hda_core videobuf2_core snd_hwdep btusb v4l2_common btrtl snd_seq btbcm btintel videodev cfg80211
snd_seq_device rtsx_pci_ms bluetooth pcspkr input_leds i2c_i801 media parport_pc memstick rfkill sg lpc_ich snd_pcm 8250_fintek parport joydev snd_timer snd soundcore hp_accel ie31200_edac
mei_me lis3lv02d edac_core input_polldev mei hp_wireless shpchp tpm_infineon sch_fq_codel nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables autofs4 xfs libcrc32c sd_mod sr_mod cdrom
rtsx_pci_sdmmc mmc_core crc32c_intel serio_raw rtsx_pci
[ 1003.342822] nouveau ahci libahci mxm_wmi e1000e xhci_pci hwmon ptp drm_kms_helper pps_core xhci_hcd ttm wmi video ipv6
[ 1003.342839] CPU: 0 PID: 199 Comm: kworker/0:2 Not tainted 4.2.0-2.el7_UNSUPPORTED.x86_64 #1
[ 1003.342840] Hardware name: Hewlett-Packard HP ZBook 15 G2/2253, BIOS M70 Ver. 01.07 02/26/2015
[ 1003.342843] Workqueue: pciehp-3 pciehp_power_thread
[ 1003.342844] ffffffff81a90655 ffff8804866d3b48 ffffffff8164763a 0000000000000000
[ 1003.342846] ffff8804866d3b98 ffff8804866d3b88 ffffffff8107134a ffff8804866d3b88
[ 1003.342847] ffff880486f46000 ffff88046c8a8000 ffff880486f46840 ffff88046c8a8098
[ 1003.342848] Call Trace:
[ 1003.342852] [<ffffffff8164763a>] dump_stack+0x45/0x57
[ 1003.342855] [<ffffffff8107134a>] warn_slowpath_common+0x8a/0xc0
[ 1003.342857] [<ffffffff810713c6>] warn_slowpath_fmt+0x46/0x50
[ 1003.342859] [<ffffffff8133719e>] ? pci_disable_msix+0x3e/0x50
[ 1003.342860] [<ffffffff812f6328>] bad_io_access+0x38/0x40
[ 1003.342861] [<ffffffff812f6567>] pci_iounmap+0x27/0x40
[ 1003.342865] [<ffffffffa0b728d7>] igb_remove+0xc7/0x160 [igb]
[ 1003.342867] [<ffffffff8132189f>] pci_device_remove+0x3f/0xc0
[ 1003.342869] [<ffffffff81433426>] __device_release_driver+0x96/0x130
[ 1003.342870] [<ffffffff814334e3>] device_release_driver+0x23/0x30
[ 1003.342871] [<ffffffff8131b404>] pci_stop_bus_device+0x94/0xa0
[ 1003.342872] [<ffffffff8131b3ad>] pci_stop_bus_device+0x3d/0xa0
[ 1003.342873] [<ffffffff8131b3ad>] pci_stop_bus_device+0x3d/0xa0
[ 1003.342874] [<ffffffff8131b516>] pci_stop_and_remove_bus_device+0x16/0x30
[ 1003.342876] [<ffffffff81333f5b>] pciehp_unconfigure_device+0x9b/0x180
[ 1003.342877] [<ffffffff81333a73>] pciehp_disable_slot+0x43/0xb0
[ 1003.342878] [<ffffffff81333b6d>] pciehp_power_thread+0x8d/0xb0
[ 1003.342885] [<ffffffff810881b2>] process_one_work+0x152/0x3d0
[ 1003.342886] [<ffffffff8108854a>] worker_thread+0x11a/0x460
[ 1003.342887] [<ffffffff81088430>] ? process_one_work+0x3d0/0x3d0
[ 1003.342890] [<ffffffff8108ddd9>] kthread+0xc9/0xe0
[ 1003.342891] [<ffffffff8108dd10>] ? kthread_create_on_node+0x180/0x180
[ 1003.342893] [<ffffffff8164e29f>] ret_from_fork+0x3f/0x70
[ 1003.342894] [<ffffffff8108dd10>] ? kthread_create_on_node+0x180/0x180
[ 1003.342895] ---[ end trace 65a77e06d5aa9358 ]---
Upon looking at the igb driver, I see that igb_rd32() attempted to read from
hw_addr and failed, so it set hw->hw_addr to NULL and spit out the message
in the log output above, "PCIe link lost, device now detached".
Well, now that hw_addr is NULL, the attempt to call pci_iounmap is obviously
not going to go well. As suggested by Mark Rustad, do something similar to
what ixgbe does, and save a copy of hw_addr as adapter->io_addr, so we can
still call pci_iounmap on it on teardown. Additionally, for consistency,
make the pci_iomap call assignment directly to io_addr, so map and unmap
match.
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2015-09-11 03:37:50 +08:00
|
|
|
u8 __iomem *io_addr; /* Mainly for iounmap use */
|
|
|
|
|
2011-08-26 15:43:48 +08:00
|
|
|
struct work_struct reset_task;
|
|
|
|
struct work_struct watchdog_task;
|
|
|
|
bool fc_autoneg;
|
|
|
|
u8 tx_timeout_factor;
|
|
|
|
struct timer_list blink_timer;
|
|
|
|
unsigned long led_status;
|
|
|
|
|
2008-01-24 18:22:38 +08:00
|
|
|
/* OS defined structs */
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
|
2019-01-10 01:10:53 +08:00
|
|
|
spinlock_t stats64_lock;
|
2010-10-16 01:27:10 +08:00
|
|
|
struct rtnl_link_stats64 stats64;
|
|
|
|
|
2008-01-24 18:22:38 +08:00
|
|
|
/* structs defined in e1000_hw.h */
|
|
|
|
struct e1000_hw hw;
|
|
|
|
struct e1000_hw_stats stats;
|
|
|
|
struct e1000_phy_info phy_info;
|
|
|
|
|
|
|
|
u32 test_icr;
|
|
|
|
struct igb_ring test_tx_ring;
|
|
|
|
struct igb_ring test_rx_ring;
|
|
|
|
|
|
|
|
int msg_enable;
|
2009-10-27 23:49:27 +08:00
|
|
|
|
|
|
|
struct igb_q_vector *q_vector[MAX_Q_VECTORS];
|
2008-01-24 18:22:38 +08:00
|
|
|
u32 eims_enable_mask;
|
2008-06-28 02:00:39 +08:00
|
|
|
u32 eims_other;
|
2008-01-24 18:22:38 +08:00
|
|
|
|
|
|
|
/* to not mess up cache alignment, always add to the bottom */
|
2009-10-28 07:50:38 +08:00
|
|
|
u16 tx_ring_count;
|
|
|
|
u16 rx_ring_count;
|
2009-02-20 12:39:23 +08:00
|
|
|
unsigned int vfs_allocated_count;
|
2009-02-20 12:40:07 +08:00
|
|
|
struct vf_data_storage *vf_data;
|
2011-02-08 10:28:46 +08:00
|
|
|
int vf_rate_link_speed;
|
2009-11-13 02:37:19 +08:00
|
|
|
u32 rss_queues;
|
2010-11-06 10:08:26 +08:00
|
|
|
u32 wvbr;
|
2011-10-14 08:13:49 +08:00
|
|
|
u32 *shadow_vfta;
|
2012-03-16 18:55:32 +08:00
|
|
|
|
|
|
|
struct ptp_clock *ptp_clock;
|
2012-08-10 13:40:44 +08:00
|
|
|
struct ptp_clock_info ptp_caps;
|
|
|
|
struct delayed_work ptp_overflow_work;
|
2012-08-18 15:26:33 +08:00
|
|
|
struct work_struct ptp_tx_work;
|
|
|
|
struct sk_buff *ptp_tx_skb;
|
2014-01-11 15:20:06 +08:00
|
|
|
struct hwtstamp_config tstamp_config;
|
2012-12-13 15:20:34 +08:00
|
|
|
unsigned long ptp_tx_start;
|
2012-12-13 15:20:35 +08:00
|
|
|
unsigned long last_rx_ptp_check;
|
2014-04-02 18:33:33 +08:00
|
|
|
unsigned long last_rx_timestamp;
|
2016-05-25 04:56:27 +08:00
|
|
|
unsigned int ptp_flags;
|
2012-03-16 18:55:32 +08:00
|
|
|
spinlock_t tmreg_lock;
|
|
|
|
struct cyclecounter cc;
|
|
|
|
struct timecounter tc;
|
2012-12-13 15:20:34 +08:00
|
|
|
u32 tx_hwtstamp_timeouts;
|
2017-05-04 01:28:59 +08:00
|
|
|
u32 tx_hwtstamp_skipped;
|
2012-12-13 15:20:35 +08:00
|
|
|
u32 rx_hwtstamp_cleared;
|
2016-09-10 00:10:51 +08:00
|
|
|
bool pps_sys_wrap_on;
|
2012-08-10 13:40:43 +08:00
|
|
|
|
2014-11-22 04:51:26 +08:00
|
|
|
struct ptp_pin_desc sdp_config[IGB_N_SDP];
|
|
|
|
struct {
|
2015-09-30 19:26:33 +08:00
|
|
|
struct timespec64 start;
|
|
|
|
struct timespec64 period;
|
2014-11-22 04:51:26 +08:00
|
|
|
} perout[IGB_N_PEROUT];
|
|
|
|
|
2012-06-15 00:04:19 +08:00
|
|
|
char fw_version[32];
|
2012-12-07 11:01:42 +08:00
|
|
|
#ifdef CONFIG_IGB_HWMON
|
2013-11-26 15:15:23 +08:00
|
|
|
struct hwmon_buff *igb_hwmon_buff;
|
2012-12-07 11:01:42 +08:00
|
|
|
bool ets;
|
|
|
|
#endif
|
2012-12-07 11:00:30 +08:00
|
|
|
struct i2c_algo_bit_data i2c_algo;
|
|
|
|
struct i2c_adapter i2c_adap;
|
2013-02-20 15:40:55 +08:00
|
|
|
struct i2c_client *i2c_client;
|
2013-08-01 04:19:54 +08:00
|
|
|
u32 rss_indir_tbl_init;
|
|
|
|
u8 rss_indir_tbl[IGB_RETA_SIZE];
|
2013-08-28 10:22:43 +08:00
|
|
|
|
|
|
|
unsigned long link_check_timeout;
|
2013-10-17 13:36:26 +08:00
|
|
|
int copper_tries;
|
|
|
|
struct e1000_info ei;
|
2014-03-12 11:58:22 +08:00
|
|
|
u16 eee_advert;
|
2016-07-06 13:22:54 +08:00
|
|
|
|
|
|
|
/* RX network flow classification support */
|
|
|
|
struct hlist_head nfc_filter_list;
|
2018-04-11 01:49:59 +08:00
|
|
|
struct hlist_head cls_flower_list;
|
2016-07-06 13:22:54 +08:00
|
|
|
unsigned int nfc_filter_count;
|
|
|
|
/* lock for RX network flow classification filter */
|
|
|
|
spinlock_t nfc_lock;
|
2016-07-06 13:22:55 +08:00
|
|
|
bool etype_bitmap[MAX_ETYPE_FILTER];
|
2017-03-07 16:20:25 +08:00
|
|
|
|
|
|
|
struct igb_mac_addr *mac_table;
|
2017-03-07 16:20:26 +08:00
|
|
|
struct vf_mac_filter vf_macs;
|
|
|
|
struct vf_mac_filter *vf_mac_list;
|
2008-01-24 18:22:38 +08:00
|
|
|
};
|
|
|
|
|
2016-05-25 04:56:27 +08:00
|
|
|
/* flags controlling PTP/1588 function */
|
|
|
|
#define IGB_PTP_ENABLED BIT(0)
|
2016-05-25 04:56:28 +08:00
|
|
|
#define IGB_PTP_OVERFLOW_CHECK BIT(1)
|
2016-05-25 04:56:27 +08:00
|
|
|
|
2016-04-14 07:08:28 +08:00
|
|
|
#define IGB_FLAG_HAS_MSI BIT(0)
|
|
|
|
#define IGB_FLAG_DCA_ENABLED BIT(1)
|
|
|
|
#define IGB_FLAG_QUAD_PORT_A BIT(2)
|
|
|
|
#define IGB_FLAG_QUEUE_PAIRS BIT(3)
|
|
|
|
#define IGB_FLAG_DMAC BIT(4)
|
|
|
|
#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
|
|
|
|
#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
|
|
|
|
#define IGB_FLAG_WOL_SUPPORTED BIT(8)
|
|
|
|
#define IGB_FLAG_NEED_LINK_UPDATE BIT(9)
|
|
|
|
#define IGB_FLAG_MEDIA_RESET BIT(10)
|
|
|
|
#define IGB_FLAG_MAS_CAPABLE BIT(11)
|
|
|
|
#define IGB_FLAG_MAS_ENABLE BIT(12)
|
|
|
|
#define IGB_FLAG_HAS_MSIX BIT(13)
|
|
|
|
#define IGB_FLAG_EEE BIT(14)
|
2016-01-07 15:11:18 +08:00
|
|
|
#define IGB_FLAG_VLAN_PROMISC BIT(15)
|
2017-02-07 10:26:52 +08:00
|
|
|
#define IGB_FLAG_RX_LEGACY BIT(16)
|
2017-10-17 09:01:28 +08:00
|
|
|
#define IGB_FLAG_FQTSS BIT(17)
|
2013-10-17 13:36:26 +08:00
|
|
|
|
|
|
|
/* Media Auto Sense */
|
|
|
|
#define IGB_MAS_ENABLE_0 0X0001
|
|
|
|
#define IGB_MAS_ENABLE_1 0X0002
|
|
|
|
#define IGB_MAS_ENABLE_2 0X0004
|
|
|
|
#define IGB_MAS_ENABLE_3 0X0008
|
2011-03-12 12:43:54 +08:00
|
|
|
|
|
|
|
/* DMA Coalescing defines */
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_MIN_TXPBSIZE 20408
|
|
|
|
#define IGB_TX_BUF_4096 4096
|
|
|
|
#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
|
2008-07-09 06:10:46 +08:00
|
|
|
|
2013-02-23 15:29:56 +08:00
|
|
|
#define IGB_82576_TSYNC_SHIFT 19
|
2008-01-24 18:22:38 +08:00
|
|
|
enum e1000_state_t {
|
|
|
|
__IGB_TESTING,
|
|
|
|
__IGB_RESETTING,
|
2014-03-15 22:55:32 +08:00
|
|
|
__IGB_DOWN,
|
|
|
|
__IGB_PTP_TX_IN_PROGRESS,
|
2008-01-24 18:22:38 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
enum igb_boards {
|
|
|
|
board_82575,
|
|
|
|
};
|
|
|
|
|
|
|
|
extern char igb_driver_name[];
|
|
|
|
extern char igb_driver_version[];
|
|
|
|
|
2016-02-03 16:20:50 +08:00
|
|
|
int igb_open(struct net_device *netdev);
|
|
|
|
int igb_close(struct net_device *netdev);
|
2013-09-24 02:37:59 +08:00
|
|
|
int igb_up(struct igb_adapter *);
|
|
|
|
void igb_down(struct igb_adapter *);
|
|
|
|
void igb_reinit_locked(struct igb_adapter *);
|
|
|
|
void igb_reset(struct igb_adapter *);
|
2013-10-01 19:33:56 +08:00
|
|
|
int igb_reinit_queues(struct igb_adapter *);
|
2013-09-24 02:37:59 +08:00
|
|
|
void igb_write_rss_indir_tbl(struct igb_adapter *);
|
|
|
|
int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
|
|
|
|
int igb_setup_tx_resources(struct igb_ring *);
|
|
|
|
int igb_setup_rx_resources(struct igb_ring *);
|
|
|
|
void igb_free_tx_resources(struct igb_ring *);
|
|
|
|
void igb_free_rx_resources(struct igb_ring *);
|
|
|
|
void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
|
|
|
|
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
|
|
|
|
void igb_setup_tctl(struct igb_adapter *);
|
|
|
|
void igb_setup_rctl(struct igb_adapter *);
|
|
|
|
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
|
|
|
|
void igb_alloc_rx_buffers(struct igb_ring *, u16);
|
2017-05-17 06:55:16 +08:00
|
|
|
void igb_update_stats(struct igb_adapter *);
|
2013-09-24 02:37:59 +08:00
|
|
|
bool igb_has_link(struct igb_adapter *adapter);
|
|
|
|
void igb_set_ethtool_ops(struct net_device *);
|
|
|
|
void igb_power_up_link(struct igb_adapter *);
|
|
|
|
void igb_set_fw_version(struct igb_adapter *);
|
|
|
|
void igb_ptp_init(struct igb_adapter *adapter);
|
|
|
|
void igb_ptp_stop(struct igb_adapter *adapter);
|
|
|
|
void igb_ptp_reset(struct igb_adapter *adapter);
|
2016-05-25 04:56:30 +08:00
|
|
|
void igb_ptp_suspend(struct igb_adapter *adapter);
|
2013-09-24 02:37:59 +08:00
|
|
|
void igb_ptp_rx_hang(struct igb_adapter *adapter);
|
2017-05-04 01:29:03 +08:00
|
|
|
void igb_ptp_tx_hang(struct igb_adapter *adapter);
|
2013-09-24 02:37:59 +08:00
|
|
|
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
|
2017-02-07 10:26:40 +08:00
|
|
|
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
2013-09-24 02:37:59 +08:00
|
|
|
struct sk_buff *skb);
|
2014-01-11 15:20:06 +08:00
|
|
|
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
|
|
|
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
igb: Fix oops caused by missing queue pairing
When initializing igb driver (e.g. 82576, I350), IGB_FLAG_QUEUE_PAIRS is
set if adapter->rss_queues exceeds half of max_rss_queues in
igb_init_queue_configuration().
On the other hand, IGB_FLAG_QUEUE_PAIRS is not set even if the number of
queues exceeds half of max_combined in igb_set_channels() when changing
the number of queues by "ethtool -L".
In this case, if numvecs is larger than MAX_MSIX_ENTRIES (10), the size
of adapter->msix_entries[], an overflow can occur in
igb_set_interrupt_capability(), which in turn leads to an oops.
Fix this problem as follows:
- When changing the number of queues by "ethtool -L", set
IGB_FLAG_QUEUE_PAIRS in the same way as initializing igb driver.
- When increasing the size of q_vector, reallocate it appropriately.
(With IGB_FLAG_QUEUE_PAIRS set, the size of q_vector gets larger.)
Another possible way to fix this problem is to cap the queues at its
initial number, which is the number of the initial online cpus. But this
is not the optimal way because we cannot increase queues when another
cpu becomes online.
Note that before commit cd14ef54d25b ("igb: Change to use statically
allocated array for MSIx entries"), this problem did not cause oops
but just made the number of queues become 1 because of entering msi_only
mode in igb_set_interrupt_capability().
Fixes: 907b7835799f ("igb: Add ethtool support to configure number of channels")
CC: stable <stable@vger.kernel.org>
Signed-off-by: Shota Suzuki <suzuki_shota_t3@lab.ntt.co.jp>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2015-07-01 08:25:52 +08:00
|
|
|
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
|
2017-09-19 21:40:54 +08:00
|
|
|
unsigned int igb_get_max_rss_queues(struct igb_adapter *);
|
2012-12-07 11:01:42 +08:00
|
|
|
#ifdef CONFIG_IGB_HWMON
|
2013-09-24 02:37:59 +08:00
|
|
|
void igb_sysfs_exit(struct igb_adapter *adapter);
|
|
|
|
int igb_sysfs_init(struct igb_adapter *adapter);
|
2012-12-07 11:01:42 +08:00
|
|
|
#endif
|
2008-11-22 13:30:24 +08:00
|
|
|
static inline s32 igb_reset_phy(struct e1000_hw *hw)
|
|
|
|
{
|
2009-02-07 07:17:26 +08:00
|
|
|
if (hw->phy.ops.reset)
|
|
|
|
return hw->phy.ops.reset(hw);
|
2008-11-22 13:30:24 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
|
|
|
|
{
|
2009-02-07 07:17:26 +08:00
|
|
|
if (hw->phy.ops.read_reg)
|
|
|
|
return hw->phy.ops.read_reg(hw, offset, data);
|
2008-11-22 13:30:24 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
|
|
|
|
{
|
2009-02-07 07:17:26 +08:00
|
|
|
if (hw->phy.ops.write_reg)
|
|
|
|
return hw->phy.ops.write_reg(hw, offset, data);
|
2008-11-22 13:30:24 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline s32 igb_get_phy_info(struct e1000_hw *hw)
|
|
|
|
{
|
|
|
|
if (hw->phy.ops.get_phy_info)
|
|
|
|
return hw->phy.ops.get_phy_info(hw);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-01-05 04:23:36 +08:00
|
|
|
static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
|
|
|
|
{
|
|
|
|
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
|
|
|
|
}
|
|
|
|
|
2016-07-06 13:22:54 +08:00
|
|
|
int igb_add_filter(struct igb_adapter *adapter,
|
|
|
|
struct igb_nfc_filter *input);
|
|
|
|
int igb_erase_filter(struct igb_adapter *adapter,
|
|
|
|
struct igb_nfc_filter *input);
|
|
|
|
|
2018-04-11 01:49:54 +08:00
|
|
|
int igb_add_mac_steering_filter(struct igb_adapter *adapter,
|
|
|
|
const u8 *addr, u8 queue, u8 flags);
|
|
|
|
int igb_del_mac_steering_filter(struct igb_adapter *adapter,
|
|
|
|
const u8 *addr, u8 queue, u8 flags);
|
|
|
|
|
2008-01-24 18:22:38 +08:00
|
|
|
#endif /* _IGB_H_ */
|