Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2017-04-18

This series contains updates to mainly ixgbe with only one ixgbevf change.

Usha adds a check to ensure the creation of number of VF's is valid based
on the traffic classes configured, all to avoid transmit hangs.

Joe Perches reduces the use of pr_cont since the output can be interleaved
by other processes.

Tony cleans up the code overwriting the KX4 config, which is configured by
the NVM.  Adds a check for MMNGC.MNG_VETO, to resolve an issue where we
were getting a link loss for the BMC when loading the driver.

Don fixes up SGMII x553 config details which were missed in earlier
implementations.  Added support for x552 XFI backplane interface support.
Cleaned up an unused define, which was causing confusion on supported
devices.

Emil fixes a link issue on KR parts by making sure the default setting is
set.  Refactors the code so that the code for allocating memory for the
list of MAC addresses that the VFs can use into its own function.  Made
some code cleans to help readability and ensure notification of SRIOV
being enabled is done upon completion.  Fixed an issue where if we failed
to allocate vfinfo in __ixgbe_enable_sriov() the driver would crash with
a NULL pointer dereference.

Philippe Reynes updates ixgbevf to use the new API for
{get|set}_link_ksettings.

Alex increases the headroom allocation when using build_skb() on a
system with 4K pages.  Fixed an issue in ixgbe_dump() where we were no
longer clearing the status bit.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-04-20 16:10:31 -04:00
commit a5f62ca68f
9 changed files with 292 additions and 198 deletions

View File

@ -86,17 +86,62 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
#define IXGBE_RXBUFFER_1536 1536
#define IXGBE_RXBUFFER_2K 2048
#define IXGBE_RXBUFFER_3K 3072
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
/* Attempt to maximize the headroom available for incoming frames. We
* use a 2K buffer for receives and need 1536/1534 to store the data for
* the frame. This leaves us with 512 bytes of room. From that we need
* to deduct the space needed for the shared info and the padding needed
* to IP align the frame.
*
* Note: For cache line sizes 256 or larger this value is going to end
* up negative. In these cases we should fall back to the 3K
* buffers.
*/
#if (PAGE_SIZE < 8192)
#define IXGBE_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
#define IXGBE_2K_TOO_SMALL_WITH_PADDING \
((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
static inline int ixgbe_compute_pad(int rx_buf_len)
{
int page_size, pad_size;
page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
return pad_size;
}
static inline int ixgbe_skb_pad(void)
{
int rx_buf_len;
/* If a 2K buffer cannot handle a standard Ethernet frame then
* optimize padding for a 3K buffer instead of a 1.5K buffer.
*
* For a 3K buffer we need to add enough padding to allow for
* tailroom due to NET_IP_ALIGN possibly shifting us out of
* cache-line alignment.
*/
if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
else
rx_buf_len = IXGBE_RXBUFFER_1536;
/* if needed make room for NET_IP_ALIGN */
rx_buf_len -= NET_IP_ALIGN;
return ixgbe_compute_pad(rx_buf_len);
}
#define IXGBE_SKB_PAD ixgbe_skb_pad()
#else
#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
/*
@ -361,7 +406,7 @@ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
return IXGBE_RXBUFFER_3K;
#if (PAGE_SIZE < 8192)
if (ring_uses_build_skb(ring))
return IXGBE_MAX_FRAME_BUILD_SKB;
return IXGBE_MAX_2K_FRAME_BUILD_SKB;
#endif
return IXGBE_RXBUFFER_2K;
}

View File

@ -179,6 +179,7 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82598_BX:
case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_XFI:
return SUPPORTED_10000baseKR_Full;
default:
return SUPPORTED_10000baseKX4_Full |

View File

@ -131,6 +131,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
@ -508,7 +509,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
*/
static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
{
int i = 0, j = 0;
int i;
char rname[16];
u32 regs[64];
@ -570,17 +571,21 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
break;
default:
pr_info("%-15s %08x\n", reginfo->name,
IXGBE_READ_REG(hw, reginfo->ofs));
pr_info("%-15s %08x\n",
reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
return;
}
for (i = 0; i < 8; i++) {
snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
pr_err("%-15s", rname);
i = 0;
while (i < 64) {
int j;
char buf[9 * 8 + 1];
char *p = buf;
snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
for (j = 0; j < 8; j++)
pr_cont(" %08x", regs[i*8+j]);
pr_cont("\n");
p += sprintf(p, " %08x", regs[i++]);
pr_err("%-15s%s\n", rname, buf);
}
}
@ -601,7 +606,6 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
struct ixgbe_ring *rx_ring;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer_info;
u32 staterr;
int i = 0;
if (!netif_msg_hw(adapter))
@ -701,7 +705,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
tx_buffer = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (dma_unmap_len(tx_buffer, len) > 0) {
pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
const char *ring_desc;
if (i == tx_ring->next_to_use &&
i == tx_ring->next_to_clean)
ring_desc = " NTC/U";
else if (i == tx_ring->next_to_use)
ring_desc = " NTU";
else if (i == tx_ring->next_to_clean)
ring_desc = " NTC";
else
ring_desc = "";
pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
@ -709,16 +724,8 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
(u64)tx_buffer->time_stamp,
tx_buffer->skb);
if (i == tx_ring->next_to_use &&
i == tx_ring->next_to_clean)
pr_cont(" NTC/U\n");
else if (i == tx_ring->next_to_use)
pr_cont(" NTU\n");
else if (i == tx_ring->next_to_clean)
pr_cont(" NTC\n");
else
pr_cont("\n");
tx_buffer->skb,
ring_desc);
if (netif_msg_pktdata(adapter) &&
tx_buffer->skb)
@ -797,34 +804,44 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
pr_info("%s%s%s",
pr_info("%s%s%s\n",
"R [desc] [ PktBuf A0] ",
"[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
"<-- Adv Rx Read format\n");
pr_info("%s%s%s",
"<-- Adv Rx Read format");
pr_info("%s%s%s\n",
"RWB[desc] [PcsmIpSHl PtRs] ",
"[vl er S cks ln] ---------------- [bi->skb ] ",
"<-- Adv Rx Write-Back format\n");
"<-- Adv Rx Write-Back format");
for (i = 0; i < rx_ring->count; i++) {
const char *ring_desc;
if (i == rx_ring->next_to_use)
ring_desc = " NTU";
else if (i == rx_ring->next_to_clean)
ring_desc = " NTC";
else
ring_desc = "";
rx_buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IXGBE_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (staterr & IXGBE_RXD_STAT_DD) {
if (rx_desc->wb.upper.length) {
/* Descriptor Done */
pr_info("RWB[0x%03X] %016llX "
"%016llX ---------------- %p", i,
pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
rx_buffer_info->skb);
rx_buffer_info->skb,
ring_desc);
} else {
pr_info("R [0x%03X] %016llX "
"%016llX %016llX %p", i,
pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)rx_buffer_info->dma,
rx_buffer_info->skb);
rx_buffer_info->skb,
ring_desc);
if (netif_msg_pktdata(adapter) &&
rx_buffer_info->dma) {
@ -835,14 +852,6 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
ixgbe_rx_bufsz(rx_ring), true);
}
}
if (i == rx_ring->next_to_use)
pr_cont(" NTU\n");
else if (i == rx_ring->next_to_clean)
pr_cont(" NTC\n");
else
pr_cont("\n");
}
}
}
@ -3802,7 +3811,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
/* Limit the maximum frame size so we don't overrun the skb */
if (ring_uses_build_skb(ring) &&
!test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
IXGBE_RXDCTL_RLPML_EN;
#endif
}
@ -3972,8 +3981,8 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
(max_frame > IXGBE_MAX_FRAME_BUILD_SKB))
if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
(max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
#endif
}
@ -5944,10 +5953,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
/* assign number of SR-IOV VFs */
if (hw->mac.type != ixgbe_mac_82598EB) {
if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
adapter->num_vfs = 0;
max_vfs = 0;
e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
} else {
adapter->num_vfs = max_vfs;
}
}
#endif /* CONFIG_PCI_IOV */
@ -9810,7 +9817,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgbe_init_mbx_params_pf(hw);
hw->mbx.ops = ii->mbx_ops;
pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
ixgbe_enable_sriov(adapter);
ixgbe_enable_sriov(adapter, max_vfs);
skip_sriov:
#endif

View File

@ -792,7 +792,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
/* Setup link based on the new speed settings */
hw->phy.ops.setup_link(hw);
if (hw->phy.ops.setup_link)
hw->phy.ops.setup_link(hw);
return 0;
}

View File

@ -46,88 +46,96 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
unsigned int num_vfs)
{
struct ixgbe_hw *hw = &adapter->hw;
int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
int num_vf_macvlans, i;
num_vf_macvlans = hw->mac.num_rar_entries -
(IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
if (!num_vf_macvlans)
return;
mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
GFP_KERNEL);
if (mv_list) {
/* Initialize list of VF macvlans */
INIT_LIST_HEAD(&adapter->vf_mvs.l);
for (i = 0; i < num_vf_macvlans; i++) {
mv_list[i].vf = -1;
mv_list[i].free = true;
list_add(&mv_list[i].l, &adapter->vf_mvs.l);
}
adapter->mv_list = mv_list;
}
}
static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
unsigned int num_vfs)
{
struct ixgbe_hw *hw = &adapter->hw;
int i;
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
if (!adapter->ring_feature[RING_F_VMDQ].limit)
adapter->ring_feature[RING_F_VMDQ].limit = 1;
adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
num_vf_macvlans = hw->mac.num_rar_entries -
(IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
/* Allocate memory for per VF control structures */
adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
GFP_KERNEL);
if (!adapter->vfinfo)
return -ENOMEM;
adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
sizeof(struct vf_macvlans),
GFP_KERNEL);
if (mv_list) {
/* Initialize list of VF macvlans */
INIT_LIST_HEAD(&adapter->vf_mvs.l);
for (i = 0; i < num_vf_macvlans; i++) {
mv_list->vf = -1;
mv_list->free = true;
list_add(&mv_list->l, &adapter->vf_mvs.l);
mv_list++;
}
}
adapter->num_vfs = num_vfs;
ixgbe_alloc_vf_macvlans(adapter, num_vfs);
adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
/* Initialize default switching mode VEB */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
adapter->bridge_mode = BRIDGE_MODE_VEB;
/* If call to enable VFs succeeded then allocate memory
* for per VF control structures.
*/
adapter->vfinfo =
kcalloc(adapter->num_vfs,
sizeof(struct vf_data_storage), GFP_KERNEL);
if (adapter->vfinfo) {
/* limit trafffic classes based on VFs enabled */
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(adapter->num_vfs < 16)) {
adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
} else if (adapter->num_vfs < 32) {
adapter->dcb_cfg.num_tcs.pg_tcs = 4;
adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
} else {
adapter->dcb_cfg.num_tcs.pg_tcs = 1;
adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
}
/* Disable RSC when in SR-IOV mode */
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
for (i = 0; i < adapter->num_vfs; i++) {
/* enable spoof checking for all VFs */
adapter->vfinfo[i].spoofchk_enabled = true;
/* We support VF RSS querying only for 82599 and x540
* devices at the moment. These devices share RSS
* indirection table and RSS hash key with PF therefore
* we want to disable the querying by default.
*/
adapter->vfinfo[i].rss_query_enabled = 0;
/* Untrust all VFs */
adapter->vfinfo[i].trusted = false;
/* set the default xcast mode */
adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
}
return 0;
/* limit trafffic classes based on VFs enabled */
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
} else if (num_vfs < 32) {
adapter->dcb_cfg.num_tcs.pg_tcs = 4;
adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
} else {
adapter->dcb_cfg.num_tcs.pg_tcs = 1;
adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
}
return -ENOMEM;
/* Disable RSC when in SR-IOV mode */
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
for (i = 0; i < num_vfs; i++) {
/* enable spoof checking for all VFs */
adapter->vfinfo[i].spoofchk_enabled = true;
/* We support VF RSS querying only for 82599 and x540
* devices at the moment. These devices share RSS
* indirection table and RSS hash key with PF therefore
* we want to disable the querying by default.
*/
adapter->vfinfo[i].rss_query_enabled = 0;
/* Untrust all VFs */
adapter->vfinfo[i].trusted = false;
/* set the default xcast mode */
adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
}
e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
return 0;
}
/**
@ -165,12 +173,13 @@ static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
/* Note this function is called when the user wants to enable SR-IOV
* VFs using the now deprecated module parameter
*/
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
{
int pre_existing_vfs = 0;
unsigned int num_vfs;
pre_existing_vfs = pci_num_vf(adapter->pdev);
if (!pre_existing_vfs && !adapter->num_vfs)
if (!pre_existing_vfs && !max_vfs)
return;
/* If there are pre-existing VFs then we have to force
@ -180,7 +189,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* have been created via the new PCI SR-IOV sysfs interface.
*/
if (pre_existing_vfs) {
adapter->num_vfs = pre_existing_vfs;
num_vfs = pre_existing_vfs;
dev_warn(&adapter->pdev->dev,
"Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
} else {
@ -192,17 +201,16 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* physical function. If the user requests greater than
* 63 VFs then it is an error - reset to default of zero.
*/
adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
err = pci_enable_sriov(adapter->pdev, num_vfs);
if (err) {
e_err(probe, "Failed to enable PCI sriov: %d\n", err);
adapter->num_vfs = 0;
return;
}
}
if (!__ixgbe_enable_sriov(adapter)) {
if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
ixgbe_get_vfs(adapter);
return;
}
@ -298,6 +306,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err = 0;
u8 num_tc;
int i;
int pre_existing_vfs = pci_num_vf(dev);
@ -310,23 +319,41 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
return err;
/* While the SR-IOV capability structure reports total VFs to be 64,
* we have to limit the actual number allocated based on two factors.
* we limit the actual number allocated as below based on two factors.
* Num_TCs MAX_VFs
* 1 63
* <=4 31
* >4 15
* First, we reserve some transmit/receive resources for the PF.
* Second, VMDQ also uses the same pools that SR-IOV does. We need to
* account for this, so that we don't accidentally allocate more VFs
* than we have available pools. The PCI bus driver already checks for
* other values out of range.
*/
if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS)
return -EPERM;
num_tc = netdev_get_num_tc(adapter->netdev);
adapter->num_vfs = num_vfs;
if (num_tc > 4) {
if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
return -EPERM;
}
} else if ((num_tc > 1) && (num_tc <= 4)) {
if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
return -EPERM;
}
} else {
if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
return -EPERM;
}
}
err = __ixgbe_enable_sriov(adapter);
err = __ixgbe_enable_sriov(adapter, num_vfs);
if (err)
return err;
for (i = 0; i < adapter->num_vfs; i++)
for (i = 0; i < num_vfs; i++)
ixgbe_vf_configuration(dev, (i | 0x10000000));
/* reset before enabling SRIOV to avoid mailbox issues */

View File

@ -33,6 +33,9 @@
* 63 (IXGBE_MAX_VF_FUNCTIONS - 1)
*/
#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VF_FUNCTIONS
#define IXGBE_MAX_VFS_4TC 32
#define IXGBE_MAX_VFS_8TC 16
#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
@ -56,7 +59,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs);
#endif
int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);

View File

@ -85,6 +85,7 @@
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
@ -1387,9 +1388,6 @@ struct ixgbe_thermal_sensor_data {
#define ATH_PHY_ID 0x03429050
#define AQ_FW_REV 0x20
/* PHY Types */
#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
#define IXGBE_PHY_INIT_END_NL 0xFFFF
@ -3128,6 +3126,7 @@ enum ixgbe_phy_type {
ixgbe_phy_aq,
ixgbe_phy_x550em_kr,
ixgbe_phy_x550em_kx4,
ixgbe_phy_x550em_xfi,
ixgbe_phy_x550em_ext_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
@ -3754,15 +3753,6 @@ struct ixgbe_info {
#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
#define IXGBE_KX4_LINK_CNTL_1 0x4C
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16)
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17)
#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24)
#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25)
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29)
#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30)
#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31)
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
@ -3779,12 +3769,14 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
#define IXGBE_NW_MNG_IF_SEL 0x00011178
#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21)
#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3

View File

@ -320,6 +320,9 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
case IXGBE_DEV_ID_X550EM_X_XFI:
hw->phy.type = ixgbe_phy_x550em_xfi;
break;
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
@ -334,6 +337,16 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
hw->phy.type = ixgbe_phy_fw;
hw->phy.ops.read_reg = NULL;
hw->phy.ops.write_reg = NULL;
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
else
hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
break;
default:
break;
}
@ -2215,8 +2228,38 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
*speed = IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
switch (hw->phy.type) {
case ixgbe_phy_x550em_kx4:
*speed = IXGBE_LINK_SPEED_1GB_FULL |
IXGBE_LINK_SPEED_2_5GB_FULL |
IXGBE_LINK_SPEED_10GB_FULL;
break;
case ixgbe_phy_x550em_xfi:
*speed = IXGBE_LINK_SPEED_1GB_FULL |
IXGBE_LINK_SPEED_10GB_FULL;
break;
case ixgbe_phy_sgmii:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case ixgbe_phy_x550em_kr:
if (hw->mac.type == ixgbe_mac_x550em_a) {
/* check different backplane modes */
if (hw->phy.nw_mng_if_sel &
IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
break;
} else if (hw->device_id ==
IXGBE_DEV_ID_X550EM_A_KR_L) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
}
}
/* fall through */
default:
*speed = IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
break;
}
*autoneg = true;
}
return 0;
@ -2473,44 +2516,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
return ixgbe_restart_an_internal_phy_x550em(hw);
}
/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
* @hw: pointer to hardware structure
*
* Configures the integrated KX4 PHY.
**/
static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
{
s32 status;
u32 reg_val;
status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
hw->bus.lan_id, &reg_val);
if (status)
return status;
reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
/* Advertise 10G support. */
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
/* Advertise 1G support. */
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
/* Restart auto-negotiation. */
reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
hw->bus.lan_id, reg_val);
return status;
}
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
@ -2521,6 +2526,9 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
return 0;
if (ixgbe_check_reset_blocked(hw))
return 0;
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@ -3134,7 +3142,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Set functions pointers based on phy type */
switch (hw->phy.type) {
case ixgbe_phy_x550em_kx4:
phy->ops.setup_link = ixgbe_setup_kx4_x550em;
phy->ops.setup_link = NULL;
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
@ -3143,6 +3151,12 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
case ixgbe_phy_x550em_xfi:
/* link is managed by HW */
phy->ops.setup_link = NULL;
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
case ixgbe_phy_x550em_ext_t:
/* Save NW management interface connected on board. This is used
* to determine internal PHY mode
@ -3164,6 +3178,9 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
case ixgbe_phy_sgmii:
phy->ops.setup_link = NULL;
break;
case ixgbe_phy_fw:
phy->ops.setup_link = ixgbe_setup_fw_link;
phy->ops.reset = ixgbe_reset_phy_fw;
@ -3193,6 +3210,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
case IXGBE_DEV_ID_X550EM_X_XFI:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
@ -3780,7 +3798,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.get_media_type = ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
.get_wwn_prefix = NULL,
.setup_link = NULL, /* defined later */
.setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = ixgbe_get_link_capabilities_X550em,
.get_bus_info = ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,

View File

@ -91,18 +91,18 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
static int ixgbevf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = 0;
bool link_up;
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = -1;
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = -1;
hw->mac.get_link_status = 1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@ -122,11 +122,11 @@ static int ixgbevf_get_settings(struct net_device *netdev,
break;
}
ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL;
cmd->base.speed = speed;
cmd->base.duplex = DUPLEX_FULL;
} else {
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
ecmd->duplex = DUPLEX_UNKNOWN;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
@ -885,7 +885,6 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
}
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,
@ -905,6 +904,7 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
.get_link_ksettings = ixgbevf_get_link_ksettings,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)