mirror of https://gitee.com/openkylin/linux.git
Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2018-11-07 This series contains updates to almost all of the Intel wired LAN drivers. Lance Roy replaces a spin lock with lockdep_assert_held() for igbvf driver in move toward trying to remove spin_is_locked(). Colin Ian King fixes a potential null pointer dereference by adding a check in ixgbe. Also fixed the igc driver by properly assigning the return error code of a function call, so that we can properly check it. Shannon Nelson updates the ixgbe driver to not block IPsec offload when in VEPA mode, in VEB mode, IPsec offload is still blocked because the device drops packets into a black hole. Jake adds support for software timestamping for packets sent over ixgbevf. Also modifies i40e, iavf, igb, igc, and ixgbe to delay calling skb_tx_timestamp() to the latest point possible, which is just prior to notifying the hardware of the new Tx packet. Todd adds the new WoL filter flag so that we properly report that we do not support this new feature. YueHaibing from Huawei fixes the igc driver by cleaning up variables that are not "really" used. Dan Carpenter cleans up igc whitespace issues. Miroslav Lichvar fixes e1000e for potential underflow issue in the timecounter, so modify the driver to use timecounter_cyc2time() to allow non-monotonic SYSTIM readings. Sasha provides additional igc cleanups based on community feedback. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5867b33014
|
@ -173,10 +173,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
|||
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
|
||||
ptp_clock_info);
|
||||
unsigned long flags;
|
||||
u64 ns;
|
||||
u64 cycles, ns;
|
||||
|
||||
spin_lock_irqsave(&adapter->systim_lock, flags);
|
||||
ns = timecounter_read(&adapter->tc);
|
||||
|
||||
/* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
|
||||
cycles = adapter->cc.read(&adapter->cc);
|
||||
ns = timecounter_cyc2time(&adapter->tc, cycles);
|
||||
|
||||
spin_unlock_irqrestore(&adapter->systim_lock, flags);
|
||||
|
||||
*ts = ns_to_timespec64(ns);
|
||||
|
@ -232,9 +236,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
|
|||
systim_overflow_work.work);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct timespec64 ts;
|
||||
u64 ns;
|
||||
|
||||
adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
|
||||
/* Update the timecounter */
|
||||
ns = timecounter_read(&adapter->tc);
|
||||
|
||||
ts = ns_to_timespec64(ns);
|
||||
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
|
||||
(long long) ts.tv_sec, ts.tv_nsec);
|
||||
|
||||
|
|
|
@ -2377,7 +2377,8 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
/* only magic packet is supported */
|
||||
if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
|
||||
if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
|
||||
| (wol->wolopts != WAKE_FILTER))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* is this a new value? */
|
||||
|
|
|
@ -3473,6 +3473,8 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|||
tx_desc->cmd_type_offset_bsz =
|
||||
build_ctob(td_cmd, td_offset, size, td_tag);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* Force memory writes to complete before letting h/w know there
|
||||
* are new descriptors to fetch.
|
||||
*
|
||||
|
@ -3652,8 +3654,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
|||
if (tsyn)
|
||||
tx_flags |= I40E_TX_FLAGS_TSYN;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* always enable CRC insertion offload */
|
||||
td_cmd |= I40E_TX_DESC_CMD_ICRC;
|
||||
|
||||
|
|
|
@ -2343,6 +2343,8 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
|
|||
tx_desc->cmd_type_offset_bsz =
|
||||
build_ctob(td_cmd, td_offset, size, td_tag);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* Force memory writes to complete before letting h/w know there
|
||||
* are new descriptors to fetch.
|
||||
*
|
||||
|
@ -2461,8 +2463,6 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
|
|||
if (tso < 0)
|
||||
goto out_drop;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* always enable CRC insertion offload */
|
||||
td_cmd |= IAVF_TX_DESC_CMD_ICRC;
|
||||
|
||||
|
|
|
@ -2113,7 +2113,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
|
||||
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
|
||||
|
|
|
@ -6019,6 +6019,8 @@ static int igb_tx_map(struct igb_ring *tx_ring,
|
|||
/* set the timestamp */
|
||||
first->time_stamp = jiffies;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* Force memory writes to complete before letting h/w know there
|
||||
* are new descriptors to fetch. (Only applicable for weak-ordered
|
||||
* memory model archs, such as IA-64).
|
||||
|
@ -6147,8 +6149,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
|
|||
else if (!tso)
|
||||
igb_tx_csum(tx_ring, first);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
if (igb_tx_map(tx_ring, first, hdr_len))
|
||||
goto cleanup_tx_tstamp;
|
||||
|
||||
|
|
|
@ -241,7 +241,7 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
|
|||
s32 err;
|
||||
u16 i;
|
||||
|
||||
WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
|
||||
lockdep_assert_held(&hw->mbx_lock);
|
||||
|
||||
/* lock the mailbox to prevent pf/vf race condition */
|
||||
err = e1000_obtain_mbx_lock_vf(hw);
|
||||
|
@ -279,7 +279,7 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
|
|||
s32 err;
|
||||
u16 i;
|
||||
|
||||
WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
|
||||
lockdep_assert_held(&hw->mbx_lock);
|
||||
|
||||
/* lock the mailbox to prevent pf/vf race condition */
|
||||
err = e1000_obtain_mbx_lock_vf(hw);
|
||||
|
|
|
@ -5,23 +5,14 @@
|
|||
#define _IGC_H_
|
||||
|
||||
#include <linux/kobject.h>
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <linux/ethtool.h>
|
||||
|
||||
#include <linux/sctp.h>
|
||||
|
||||
#define IGC_ERR(args...) pr_err("igc: " args)
|
||||
|
||||
#define PFX "igc: "
|
||||
|
||||
#include <linux/timecounter.h>
|
||||
#include <linux/net_tstamp.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
|
||||
#include "igc_hw.h"
|
||||
|
||||
/* main */
|
||||
|
|
|
@ -237,7 +237,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
|
|||
{
|
||||
struct igc_phy_info *phy = &hw->phy;
|
||||
s32 ret_val = 0;
|
||||
u32 ctrl_ext;
|
||||
|
||||
if (hw->phy.media_type != igc_media_type_copper) {
|
||||
phy->type = igc_phy_none;
|
||||
|
@ -247,8 +246,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
|
|||
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
|
||||
phy->reset_delay_us = 100;
|
||||
|
||||
ctrl_ext = rd32(IGC_CTRL_EXT);
|
||||
|
||||
/* set lan id */
|
||||
hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
|
||||
IGC_STATUS_FUNC_SHIFT;
|
||||
|
@ -287,8 +284,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
|
|||
static s32 igc_get_invariants_base(struct igc_hw *hw)
|
||||
{
|
||||
struct igc_mac_info *mac = &hw->mac;
|
||||
u32 link_mode = 0;
|
||||
u32 ctrl_ext = 0;
|
||||
s32 ret_val = 0;
|
||||
|
||||
switch (hw->device_id) {
|
||||
|
@ -302,9 +297,6 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
|
|||
|
||||
hw->phy.media_type = igc_media_type_copper;
|
||||
|
||||
ctrl_ext = rd32(IGC_CTRL_EXT);
|
||||
link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
|
||||
|
||||
/* mac initialization and operations */
|
||||
ret_val = igc_init_mac_params_base(hw);
|
||||
if (ret_val)
|
||||
|
|
|
@ -865,6 +865,8 @@ static int igc_tx_map(struct igc_ring *tx_ring,
|
|||
/* set the timestamp */
|
||||
first->time_stamp = jiffies;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* Force memory writes to complete before letting h/w know there
|
||||
* are new descriptors to fetch. (Only applicable for weak-ordered
|
||||
* memory model archs, such as IA-64).
|
||||
|
@ -959,8 +961,6 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
|
|||
first->bytecount = skb->len;
|
||||
first->gso_segs = 1;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* record initial flags and protocol */
|
||||
first->tx_flags = tx_flags;
|
||||
first->protocol = protocol;
|
||||
|
@ -1108,7 +1108,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
|
|||
|
||||
/* update pointers within the skb to store the data */
|
||||
skb_reserve(skb, IGC_SKB_PAD);
|
||||
__skb_put(skb, size);
|
||||
__skb_put(skb, size);
|
||||
|
||||
/* update buffer offset */
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
@ -1160,9 +1160,9 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
|
|||
(va + headlen) - page_address(rx_buffer->page),
|
||||
size, truesize);
|
||||
#if (PAGE_SIZE < 8192)
|
||||
rx_buffer->page_offset ^= truesize;
|
||||
rx_buffer->page_offset ^= truesize;
|
||||
#else
|
||||
rx_buffer->page_offset += truesize;
|
||||
rx_buffer->page_offset += truesize;
|
||||
#endif
|
||||
} else {
|
||||
rx_buffer->pagecnt_bias++;
|
||||
|
@ -1668,8 +1668,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
|
|||
tx_buffer->next_to_watch,
|
||||
jiffies,
|
||||
tx_buffer->next_to_watch->wb.status);
|
||||
netif_stop_subqueue(tx_ring->netdev,
|
||||
tx_ring->queue_index);
|
||||
netif_stop_subqueue(tx_ring->netdev,
|
||||
tx_ring->queue_index);
|
||||
|
||||
/* we are about to reset, no point in enabling stuff */
|
||||
return true;
|
||||
|
@ -1699,20 +1699,6 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
|
|||
return !!budget;
|
||||
}
|
||||
|
||||
/**
|
||||
* igc_ioctl - I/O control method
|
||||
* @netdev: network interface device structure
|
||||
* @ifreq: frequency
|
||||
* @cmd: command
|
||||
*/
|
||||
static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* igc_up - Open the interface and prepare it to handle traffic
|
||||
* @adapter: board private structure
|
||||
|
@ -3358,7 +3344,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
|
|||
goto err_req_irq;
|
||||
|
||||
/* Notify the stack of the actual queue counts. */
|
||||
netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
|
||||
err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
|
||||
if (err)
|
||||
goto err_set_queues;
|
||||
|
||||
|
@ -3445,7 +3431,6 @@ static const struct net_device_ops igc_netdev_ops = {
|
|||
.ndo_set_mac_address = igc_set_mac,
|
||||
.ndo_change_mtu = igc_change_mtu,
|
||||
.ndo_get_stats = igc_get_stats,
|
||||
.ndo_do_ioctl = igc_ioctl,
|
||||
};
|
||||
|
||||
/* PCIe configuration access */
|
||||
|
@ -3532,19 +3517,16 @@ static int igc_probe(struct pci_dev *pdev,
|
|||
struct net_device *netdev;
|
||||
struct igc_hw *hw;
|
||||
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
|
||||
int err, pci_using_dac;
|
||||
int err;
|
||||
|
||||
err = pci_enable_device_mem(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pci_using_dac = 0;
|
||||
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (!err) {
|
||||
err = dma_set_coherent_mask(&pdev->dev,
|
||||
DMA_BIT_MASK(64));
|
||||
if (!err)
|
||||
pci_using_dac = 1;
|
||||
} else {
|
||||
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
|
|
|
@ -2206,7 +2206,8 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
|
||||
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
|
||||
WAKE_FILTER))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (ixgbe_wol_exclusion(adapter, wol))
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "ixgbe.h"
|
||||
#include <net/xfrm.h>
|
||||
#include <crypto/aead.h>
|
||||
#include <linux/if_bridge.h>
|
||||
|
||||
#define IXGBE_IPSEC_KEY_BITS 160
|
||||
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
|
||||
|
@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
|
|||
} else {
|
||||
struct tx_sa tsa;
|
||||
|
||||
if (adapter->num_vfs)
|
||||
if (adapter->num_vfs &&
|
||||
adapter->bridge_mode != BRIDGE_MODE_VEPA)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* find the first unused index */
|
||||
|
|
|
@ -8269,6 +8269,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
|||
/* set the timestamp */
|
||||
first->time_stamp = jiffies;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/*
|
||||
* Force memory writes to complete before letting h/w know there
|
||||
* are new descriptors to fetch. (Only applicable for weak-ordered
|
||||
|
@ -8646,8 +8648,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
|||
}
|
||||
}
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
/*
|
||||
* Use the l2switch_enable flag - would be false if the DMA
|
||||
|
@ -10517,7 +10517,8 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
|
|||
ixgbe_configure_rx_ring(adapter, rx_ring);
|
||||
|
||||
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
|
||||
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
|
||||
if (xdp_ring)
|
||||
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -4016,6 +4016,8 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
|
|||
/* set the timestamp */
|
||||
first->time_stamp = jiffies;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* Force memory writes to complete before letting h/w know there
|
||||
* are new descriptors to fetch. (Only applicable for weak-ordered
|
||||
* memory model archs, such as IA-64).
|
||||
|
|
Loading…
Reference in New Issue