mirror of https://gitee.com/openkylin/linux.git
Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2020-01-17 This series contains updates to igc, i40e, fm10k and ice drivers. Sasha fixes a typo in a code comment that referred to silicon that is not supported in the igc driver. Cleaned up a defined that was not being used. Added support for another i225 SKU which does not have an NVM. Added support for TCP segmentation offload (TSO) into igc. Added support for PHY power management control to provide a reliable and accurate indication of PHY reset completion. Jake adds support for the new txqueue parameter to the transmit timeout function in fm10k which reduces the code complexity when determining which transmit queue is stuck. Julio Faracco makes the similar changes that Jake did for fm10k, for i40e and ice drivers. Added support for the new txqueue parameter in the transmit timeout functions for i40e and ice. Colin Ian King cleans up a redundant initialization of a local variable. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9aaa294940
|
@ -696,21 +696,24 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
|||
/**
|
||||
* fm10k_tx_timeout - Respond to a Tx Hang
|
||||
* @netdev: network interface device structure
|
||||
* @txqueue: the index of the Tx queue that timed out
|
||||
**/
|
||||
static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
||||
{
|
||||
struct fm10k_intfc *interface = netdev_priv(netdev);
|
||||
struct fm10k_ring *tx_ring;
|
||||
bool real_tx_hang = false;
|
||||
int i;
|
||||
|
||||
#define TX_TIMEO_LIMIT 16000
|
||||
for (i = 0; i < interface->num_tx_queues; i++) {
|
||||
struct fm10k_ring *tx_ring = interface->tx_ring[i];
|
||||
|
||||
if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
|
||||
real_tx_hang = true;
|
||||
if (txqueue >= interface->num_tx_queues) {
|
||||
WARN(1, "invalid Tx queue index %d", txqueue);
|
||||
return;
|
||||
}
|
||||
|
||||
tx_ring = interface->tx_ring[txqueue];
|
||||
if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
|
||||
real_tx_hang = true;
|
||||
|
||||
#define TX_TIMEO_LIMIT 16000
|
||||
if (real_tx_hang) {
|
||||
fm10k_tx_timeout_reset(interface);
|
||||
} else {
|
||||
|
|
|
@ -307,37 +307,18 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
|||
struct i40e_vsi *vsi = np->vsi;
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
struct i40e_ring *tx_ring = NULL;
|
||||
unsigned int i, hung_queue = 0;
|
||||
unsigned int i;
|
||||
u32 head, val;
|
||||
|
||||
pf->tx_timeout_count++;
|
||||
|
||||
/* find the stopped queue the same way the stack does */
|
||||
for (i = 0; i < netdev->num_tx_queues; i++) {
|
||||
struct netdev_queue *q;
|
||||
unsigned long trans_start;
|
||||
|
||||
q = netdev_get_tx_queue(netdev, i);
|
||||
trans_start = q->trans_start;
|
||||
if (netif_xmit_stopped(q) &&
|
||||
time_after(jiffies,
|
||||
(trans_start + netdev->watchdog_timeo))) {
|
||||
hung_queue = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == netdev->num_tx_queues) {
|
||||
netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
|
||||
} else {
|
||||
/* now that we have an index, find the tx_ring struct */
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++) {
|
||||
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
|
||||
if (hung_queue ==
|
||||
vsi->tx_rings[i]->queue_index) {
|
||||
tx_ring = vsi->tx_rings[i];
|
||||
break;
|
||||
}
|
||||
/* with txqueue index, find the tx_ring struct */
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++) {
|
||||
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
|
||||
if (txqueue ==
|
||||
vsi->tx_rings[i]->queue_index) {
|
||||
tx_ring = vsi->tx_rings[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -363,14 +344,14 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
|||
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
|
||||
|
||||
netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
|
||||
vsi->seid, hung_queue, tx_ring->next_to_clean,
|
||||
vsi->seid, txqueue, tx_ring->next_to_clean,
|
||||
head, tx_ring->next_to_use,
|
||||
readl(tx_ring->tail), val);
|
||||
}
|
||||
|
||||
pf->tx_timeout_last_recovery = jiffies;
|
||||
netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
|
||||
pf->tx_timeout_recovery_level, hung_queue);
|
||||
netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
|
||||
pf->tx_timeout_recovery_level, txqueue);
|
||||
|
||||
switch (pf->tx_timeout_recovery_level) {
|
||||
case 1:
|
||||
|
|
|
@ -5086,36 +5086,17 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
|||
struct ice_ring *tx_ring = NULL;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int hung_queue = -1;
|
||||
u32 i;
|
||||
|
||||
pf->tx_timeout_count++;
|
||||
|
||||
/* find the stopped queue the same way dev_watchdog() does */
|
||||
for (i = 0; i < netdev->num_tx_queues; i++) {
|
||||
unsigned long trans_start;
|
||||
struct netdev_queue *q;
|
||||
|
||||
q = netdev_get_tx_queue(netdev, i);
|
||||
trans_start = q->trans_start;
|
||||
if (netif_xmit_stopped(q) &&
|
||||
time_after(jiffies,
|
||||
trans_start + netdev->watchdog_timeo)) {
|
||||
hung_queue = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == netdev->num_tx_queues)
|
||||
netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
|
||||
else
|
||||
/* now that we have an index, find the tx_ring struct */
|
||||
for (i = 0; i < vsi->num_txq; i++)
|
||||
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
|
||||
if (hung_queue == vsi->tx_rings[i]->q_index) {
|
||||
tx_ring = vsi->tx_rings[i];
|
||||
break;
|
||||
}
|
||||
/* now that we have an index, find the tx_ring struct */
|
||||
for (i = 0; i < vsi->num_txq; i++)
|
||||
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
|
||||
if (txqueue == vsi->tx_rings[i]->q_index) {
|
||||
tx_ring = vsi->tx_rings[i];
|
||||
break;
|
||||
}
|
||||
|
||||
/* Reset recovery level if enough time has elapsed after last timeout.
|
||||
* Also ensure no new reset action happens before next timeout period.
|
||||
|
@ -5130,19 +5111,19 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
|||
struct ice_hw *hw = &pf->hw;
|
||||
u32 head, val = 0;
|
||||
|
||||
head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
|
||||
head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
|
||||
QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
|
||||
/* Read interrupt register */
|
||||
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
|
||||
|
||||
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
|
||||
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
|
||||
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
|
||||
head, tx_ring->next_to_use, val);
|
||||
}
|
||||
|
||||
pf->tx_timeout_last_recovery = jiffies;
|
||||
netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
|
||||
pf->tx_timeout_recovery_level, hung_queue);
|
||||
netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
|
||||
pf->tx_timeout_recovery_level, txqueue);
|
||||
|
||||
switch (pf->tx_timeout_recovery_level) {
|
||||
case 1:
|
||||
|
|
|
@ -1020,8 +1020,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
|
|||
s16 ntc = xdp_ring->next_to_clean;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
struct ice_tx_buf *tx_buf;
|
||||
bool xmit_done = true;
|
||||
u32 xsk_frames = 0;
|
||||
bool xmit_done;
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, ntc);
|
||||
tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
|
|
|
@ -212,6 +212,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
|
|||
case IGC_DEV_ID_I225_I:
|
||||
case IGC_DEV_ID_I220_V:
|
||||
case IGC_DEV_ID_I225_K:
|
||||
case IGC_DEV_ID_I225_BLANK_NVM:
|
||||
mac->type = igc_i225;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -282,6 +282,10 @@
|
|||
#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */
|
||||
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
|
||||
|
||||
/* IPSec Encrypt Enable */
|
||||
#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
|
||||
#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
|
||||
|
||||
/* Transmit Control */
|
||||
#define IGC_TCTL_EN 0x00000002 /* enable Tx */
|
||||
#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
|
||||
|
@ -460,6 +464,7 @@
|
|||
/* PHY Status Register */
|
||||
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
|
||||
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
|
||||
#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */
|
||||
|
||||
/* PHY 1000 MII Register/Bit Definitions */
|
||||
/* PHY Registers defined by IEEE */
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
#define IGC_DEV_ID_I225_I 0x15F8
|
||||
#define IGC_DEV_ID_I220_V 0x15F7
|
||||
#define IGC_DEV_ID_I225_K 0x3100
|
||||
|
||||
#define IGC_FUNC_0 0
|
||||
#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
|
||||
|
||||
/* Function pointers for the MAC. */
|
||||
struct igc_mac_operations {
|
||||
|
|
|
@ -45,6 +45,7 @@ static const struct pci_device_id igc_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
|
||||
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
|
||||
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
|
||||
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
|
||||
/* required last entry */
|
||||
{0, }
|
||||
};
|
||||
|
@ -880,7 +881,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
|
|||
/* set bits to identify this as an advanced context descriptor */
|
||||
type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
|
||||
|
||||
/* For 82575, context index must be unique per ring. */
|
||||
/* For i225, context index must be unique per ring. */
|
||||
if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
|
||||
mss_l4len_idx |= tx_ring->reg_idx << 4;
|
||||
|
||||
|
@ -999,6 +1000,10 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
|
|||
IGC_ADVTXD_DCMD_DEXT |
|
||||
IGC_ADVTXD_DCMD_IFCS;
|
||||
|
||||
/* set segmentation bits for TSO */
|
||||
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
|
||||
(IGC_ADVTXD_DCMD_TSE));
|
||||
|
||||
/* set timestamp bit if present */
|
||||
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
|
||||
(IGC_ADVTXD_MAC_TSTAMP));
|
||||
|
@ -1170,6 +1175,100 @@ static int igc_tx_map(struct igc_ring *tx_ring,
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int igc_tso(struct igc_ring *tx_ring,
|
||||
struct igc_tx_buffer *first,
|
||||
u8 *hdr_len)
|
||||
{
|
||||
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
|
||||
struct sk_buff *skb = first->skb;
|
||||
union {
|
||||
struct iphdr *v4;
|
||||
struct ipv6hdr *v6;
|
||||
unsigned char *hdr;
|
||||
} ip;
|
||||
union {
|
||||
struct tcphdr *tcp;
|
||||
struct udphdr *udp;
|
||||
unsigned char *hdr;
|
||||
} l4;
|
||||
u32 paylen, l4_offset;
|
||||
int err;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return 0;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
return 0;
|
||||
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
ip.hdr = skb_network_header(skb);
|
||||
l4.hdr = skb_checksum_start(skb);
|
||||
|
||||
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
|
||||
type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
|
||||
|
||||
/* initialize outer IP header fields */
|
||||
if (ip.v4->version == 4) {
|
||||
unsigned char *csum_start = skb_checksum_start(skb);
|
||||
unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
|
||||
|
||||
/* IP header will have to cancel out any data that
|
||||
* is not a part of the outer IP header
|
||||
*/
|
||||
ip.v4->check = csum_fold(csum_partial(trans_start,
|
||||
csum_start - trans_start,
|
||||
0));
|
||||
type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
|
||||
|
||||
ip.v4->tot_len = 0;
|
||||
first->tx_flags |= IGC_TX_FLAGS_TSO |
|
||||
IGC_TX_FLAGS_CSUM |
|
||||
IGC_TX_FLAGS_IPV4;
|
||||
} else {
|
||||
ip.v6->payload_len = 0;
|
||||
first->tx_flags |= IGC_TX_FLAGS_TSO |
|
||||
IGC_TX_FLAGS_CSUM;
|
||||
}
|
||||
|
||||
/* determine offset of inner transport header */
|
||||
l4_offset = l4.hdr - skb->data;
|
||||
|
||||
/* remove payload length from inner checksum */
|
||||
paylen = skb->len - l4_offset;
|
||||
if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
|
||||
/* compute length of segmentation header */
|
||||
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
|
||||
csum_replace_by_diff(&l4.tcp->check,
|
||||
(__force __wsum)htonl(paylen));
|
||||
} else {
|
||||
/* compute length of segmentation header */
|
||||
*hdr_len = sizeof(*l4.udp) + l4_offset;
|
||||
csum_replace_by_diff(&l4.udp->check,
|
||||
(__force __wsum)htonl(paylen));
|
||||
}
|
||||
|
||||
/* update gso size and bytecount with header size */
|
||||
first->gso_segs = skb_shinfo(skb)->gso_segs;
|
||||
first->bytecount += (first->gso_segs - 1) * *hdr_len;
|
||||
|
||||
/* MSS L4LEN IDX */
|
||||
mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
|
||||
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
|
||||
|
||||
/* VLAN MACLEN IPLEN */
|
||||
vlan_macip_lens = l4.hdr - ip.hdr;
|
||||
vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
|
||||
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
|
||||
|
||||
igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
|
||||
type_tucmd, mss_l4len_idx);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
|
||||
struct igc_ring *tx_ring)
|
||||
{
|
||||
|
@ -1179,6 +1278,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
|
|||
u32 tx_flags = 0;
|
||||
unsigned short f;
|
||||
u8 hdr_len = 0;
|
||||
int tso = 0;
|
||||
|
||||
/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
|
||||
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
|
||||
|
@ -1225,10 +1325,20 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
|
|||
first->tx_flags = tx_flags;
|
||||
first->protocol = protocol;
|
||||
|
||||
igc_tx_csum(tx_ring, first);
|
||||
tso = igc_tso(tx_ring, first, &hdr_len);
|
||||
if (tso < 0)
|
||||
goto out_drop;
|
||||
else if (!tso)
|
||||
igc_tx_csum(tx_ring, first);
|
||||
|
||||
igc_tx_map(tx_ring, first, hdr_len);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
out_drop:
|
||||
dev_kfree_skb_any(first->skb);
|
||||
first->skb = NULL;
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -4588,6 +4698,8 @@ static int igc_probe(struct pci_dev *pdev,
|
|||
|
||||
/* Add supported features to the features list*/
|
||||
netdev->features |= NETIF_F_SG;
|
||||
netdev->features |= NETIF_F_TSO;
|
||||
netdev->features |= NETIF_F_TSO6;
|
||||
netdev->features |= NETIF_F_RXCSUM;
|
||||
netdev->features |= NETIF_F_HW_CSUM;
|
||||
netdev->features |= NETIF_F_SCTP_CRC;
|
||||
|
|
|
@ -173,6 +173,7 @@ s32 igc_check_downshift(struct igc_hw *hw)
|
|||
s32 igc_phy_hw_reset(struct igc_hw *hw)
|
||||
{
|
||||
struct igc_phy_info *phy = &hw->phy;
|
||||
u32 phpm = 0, timeout = 10000;
|
||||
s32 ret_val;
|
||||
u32 ctrl;
|
||||
|
||||
|
@ -186,6 +187,8 @@ s32 igc_phy_hw_reset(struct igc_hw *hw)
|
|||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
phpm = rd32(IGC_I225_PHPM);
|
||||
|
||||
ctrl = rd32(IGC_CTRL);
|
||||
wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
|
||||
wrfl();
|
||||
|
@ -195,7 +198,18 @@ s32 igc_phy_hw_reset(struct igc_hw *hw)
|
|||
wr32(IGC_CTRL, ctrl);
|
||||
wrfl();
|
||||
|
||||
usleep_range(1500, 2000);
|
||||
/* SW should guarantee 100us for the completion of the PHY reset */
|
||||
usleep_range(100, 150);
|
||||
do {
|
||||
phpm = rd32(IGC_I225_PHPM);
|
||||
timeout--;
|
||||
udelay(1);
|
||||
} while (!(phpm & IGC_PHY_RST_COMP) && timeout);
|
||||
|
||||
if (!timeout)
|
||||
hw_dbg("Timeout is expired after a phy reset\n");
|
||||
|
||||
usleep_range(100, 150);
|
||||
|
||||
phy->ops.release(hw);
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#define IGC_MDIC 0x00020 /* MDI Control - RW */
|
||||
#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
|
||||
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
|
||||
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
|
||||
|
||||
/* Internal Packet Buffer Size Registers */
|
||||
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
|
||||
|
|
Loading…
Reference in New Issue