mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
This commit is contained in:
commit
258daca2bc
|
@ -409,6 +409,9 @@
|
||||||
#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
|
#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
|
||||||
|
|
||||||
/* Extended Interrupt Cause Set */
|
/* Extended Interrupt Cause Set */
|
||||||
|
/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
|
||||||
|
#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
|
||||||
|
|
||||||
|
|
||||||
/* Transmit Descriptor Control */
|
/* Transmit Descriptor Control */
|
||||||
/* Enable the counting of descriptors still to be processed. */
|
/* Enable the counting of descriptors still to be processed. */
|
||||||
|
|
|
@ -42,8 +42,11 @@
|
||||||
|
|
||||||
struct igb_adapter;
|
struct igb_adapter;
|
||||||
|
|
||||||
/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
|
/* Interrupt defines */
|
||||||
#define IGB_START_ITR 648
|
#define IGB_START_ITR 648 /* ~6000 ints/sec */
|
||||||
|
#define IGB_4K_ITR 980
|
||||||
|
#define IGB_20K_ITR 196
|
||||||
|
#define IGB_70K_ITR 56
|
||||||
|
|
||||||
/* TX/RX descriptor defines */
|
/* TX/RX descriptor defines */
|
||||||
#define IGB_DEFAULT_TXD 256
|
#define IGB_DEFAULT_TXD 256
|
||||||
|
@ -146,6 +149,7 @@ struct igb_tx_buffer {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned int bytecount;
|
unsigned int bytecount;
|
||||||
u16 gso_segs;
|
u16 gso_segs;
|
||||||
|
__be16 protocol;
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
u32 length;
|
u32 length;
|
||||||
u32 tx_flags;
|
u32 tx_flags;
|
||||||
|
@ -174,15 +178,24 @@ struct igb_rx_queue_stats {
|
||||||
u64 alloc_failed;
|
u64 alloc_failed;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct igb_q_vector {
|
struct igb_ring_container {
|
||||||
struct igb_adapter *adapter; /* backlink */
|
struct igb_ring *ring; /* pointer to linked list of rings */
|
||||||
struct igb_ring *rx_ring;
|
unsigned int total_bytes; /* total bytes processed this int */
|
||||||
struct igb_ring *tx_ring;
|
unsigned int total_packets; /* total packets processed this int */
|
||||||
struct napi_struct napi;
|
u16 work_limit; /* total work allowed per interrupt */
|
||||||
|
u8 count; /* total number of rings in vector */
|
||||||
|
u8 itr; /* current ITR setting for ring */
|
||||||
|
};
|
||||||
|
|
||||||
u32 eims_value;
|
struct igb_q_vector {
|
||||||
u16 cpu;
|
struct igb_adapter *adapter; /* backlink */
|
||||||
u16 tx_work_limit;
|
int cpu; /* CPU for DCA */
|
||||||
|
u32 eims_value; /* EIMS mask value */
|
||||||
|
|
||||||
|
struct igb_ring_container rx, tx;
|
||||||
|
|
||||||
|
struct napi_struct napi;
|
||||||
|
int numa_node;
|
||||||
|
|
||||||
u16 itr_val;
|
u16 itr_val;
|
||||||
u8 set_itr;
|
u8 set_itr;
|
||||||
|
@ -212,16 +225,12 @@ struct igb_ring {
|
||||||
u16 next_to_clean ____cacheline_aligned_in_smp;
|
u16 next_to_clean ____cacheline_aligned_in_smp;
|
||||||
u16 next_to_use;
|
u16 next_to_use;
|
||||||
|
|
||||||
unsigned int total_bytes;
|
|
||||||
unsigned int total_packets;
|
|
||||||
|
|
||||||
union {
|
union {
|
||||||
/* TX */
|
/* TX */
|
||||||
struct {
|
struct {
|
||||||
struct igb_tx_queue_stats tx_stats;
|
struct igb_tx_queue_stats tx_stats;
|
||||||
struct u64_stats_sync tx_syncp;
|
struct u64_stats_sync tx_syncp;
|
||||||
struct u64_stats_sync tx_syncp2;
|
struct u64_stats_sync tx_syncp2;
|
||||||
bool detect_tx_hung;
|
|
||||||
};
|
};
|
||||||
/* RX */
|
/* RX */
|
||||||
struct {
|
struct {
|
||||||
|
@ -231,12 +240,14 @@ struct igb_ring {
|
||||||
};
|
};
|
||||||
/* Items past this point are only used during ring alloc / free */
|
/* Items past this point are only used during ring alloc / free */
|
||||||
dma_addr_t dma; /* phys address of the ring */
|
dma_addr_t dma; /* phys address of the ring */
|
||||||
|
int numa_node; /* node to alloc ring memory on */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */
|
enum e1000_ring_flags_t {
|
||||||
#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */
|
IGB_RING_FLAG_RX_SCTP_CSUM,
|
||||||
|
IGB_RING_FLAG_TX_CTX_IDX,
|
||||||
#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */
|
IGB_RING_FLAG_TX_DETECT_HANG
|
||||||
|
};
|
||||||
|
|
||||||
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
|
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
|
||||||
|
|
||||||
|
@ -247,6 +258,13 @@ struct igb_ring {
|
||||||
#define IGB_TX_CTXTDESC(R, i) \
|
#define IGB_TX_CTXTDESC(R, i) \
|
||||||
(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
|
(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
|
||||||
|
|
||||||
|
/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
|
||||||
|
static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
|
||||||
|
const u32 stat_err_bits)
|
||||||
|
{
|
||||||
|
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
|
||||||
|
}
|
||||||
|
|
||||||
/* igb_desc_unused - calculate if we have unused descriptors */
|
/* igb_desc_unused - calculate if we have unused descriptors */
|
||||||
static inline int igb_desc_unused(struct igb_ring *ring)
|
static inline int igb_desc_unused(struct igb_ring *ring)
|
||||||
{
|
{
|
||||||
|
@ -340,6 +358,7 @@ struct igb_adapter {
|
||||||
int vf_rate_link_speed;
|
int vf_rate_link_speed;
|
||||||
u32 rss_queues;
|
u32 rss_queues;
|
||||||
u32 wvbr;
|
u32 wvbr;
|
||||||
|
int node;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IGB_FLAG_HAS_MSI (1 << 0)
|
#define IGB_FLAG_HAS_MSI (1 << 0)
|
||||||
|
|
|
@ -1577,16 +1577,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
|
||||||
union e1000_adv_rx_desc *rx_desc;
|
union e1000_adv_rx_desc *rx_desc;
|
||||||
struct igb_rx_buffer *rx_buffer_info;
|
struct igb_rx_buffer *rx_buffer_info;
|
||||||
struct igb_tx_buffer *tx_buffer_info;
|
struct igb_tx_buffer *tx_buffer_info;
|
||||||
int rx_ntc, tx_ntc, count = 0;
|
u16 rx_ntc, tx_ntc, count = 0;
|
||||||
u32 staterr;
|
|
||||||
|
|
||||||
/* initialize next to clean and descriptor values */
|
/* initialize next to clean and descriptor values */
|
||||||
rx_ntc = rx_ring->next_to_clean;
|
rx_ntc = rx_ring->next_to_clean;
|
||||||
tx_ntc = tx_ring->next_to_clean;
|
tx_ntc = tx_ring->next_to_clean;
|
||||||
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
|
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
|
||||||
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
|
|
||||||
|
|
||||||
while (staterr & E1000_RXD_STAT_DD) {
|
while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
|
||||||
/* check rx buffer */
|
/* check rx buffer */
|
||||||
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
|
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
|
||||||
|
|
||||||
|
@ -1615,7 +1613,6 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
|
||||||
|
|
||||||
/* fetch next descriptor */
|
/* fetch next descriptor */
|
||||||
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
|
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
|
||||||
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* re-map buffers to ring, store next to clean values */
|
/* re-map buffers to ring, store next to clean values */
|
||||||
|
@ -1630,7 +1627,8 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
|
||||||
{
|
{
|
||||||
struct igb_ring *tx_ring = &adapter->test_tx_ring;
|
struct igb_ring *tx_ring = &adapter->test_tx_ring;
|
||||||
struct igb_ring *rx_ring = &adapter->test_rx_ring;
|
struct igb_ring *rx_ring = &adapter->test_rx_ring;
|
||||||
int i, j, lc, good_cnt, ret_val = 0;
|
u16 i, j, lc, good_cnt;
|
||||||
|
int ret_val = 0;
|
||||||
unsigned int size = IGB_RX_HDR_LEN;
|
unsigned int size = IGB_RX_HDR_LEN;
|
||||||
netdev_tx_t tx_ret_val;
|
netdev_tx_t tx_ret_val;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
@ -2008,8 +2006,8 @@ static int igb_set_coalesce(struct net_device *netdev,
|
||||||
|
|
||||||
for (i = 0; i < adapter->num_q_vectors; i++) {
|
for (i = 0; i < adapter->num_q_vectors; i++) {
|
||||||
struct igb_q_vector *q_vector = adapter->q_vector[i];
|
struct igb_q_vector *q_vector = adapter->q_vector[i];
|
||||||
q_vector->tx_work_limit = adapter->tx_work_limit;
|
q_vector->tx.work_limit = adapter->tx_work_limit;
|
||||||
if (q_vector->rx_ring)
|
if (q_vector->rx.ring)
|
||||||
q_vector->itr_val = adapter->rx_itr_setting;
|
q_vector->itr_val = adapter->rx_itr_setting;
|
||||||
else
|
else
|
||||||
q_vector->itr_val = adapter->tx_itr_setting;
|
q_vector->itr_val = adapter->tx_itr_setting;
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue