Merge branch 'amd-xgbe-updates'

Tom Lendacky says:

====================
amd-xgbe: AMD XGBE driver updates 2016-11-03

This patch series is targeted at preparing the driver for a new PCI version
of the hardware.  After this series is applied, a follow-on series will
introduce the support for the PCI version of the hardware.

The following updates and fixes are included in this driver update series:

- Fix formatting of PCS debug register dump
- Prepare for priority-based FIFO allocation
- Implement priority-based FIFO allocation
- Prepare for working with more than one type of PCS/PHY
- Prepare for the introduction of clause 37 auto-negotiation
- Add support for clause 37 auto-negotiation
- Prepare for supporting a new PCS register access method
- Add support for 64-bit management counter registers
- Update DMA channel status determination
- Prepare for supporting PCI devices in addition to platform devices

This patch series is based on net-next.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-11-04 14:48:45 -04:00
commit 013724e9a2
10 changed files with 2938 additions and 1386 deletions

View File

@ -2,7 +2,9 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
xgbe-ptp.o
xgbe-ptp.o \
xgbe-phy-v1.o \
xgbe-platform.o
amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o

View File

@ -790,6 +790,10 @@
#define MTL_Q_RQOMR_RSF_WIDTH 1
#define MTL_Q_RQOMR_RTC_INDEX 0
#define MTL_Q_RQOMR_RTC_WIDTH 2
#define MTL_Q_TQDR_TRCSTS_INDEX 1
#define MTL_Q_TQDR_TRCSTS_WIDTH 2
#define MTL_Q_TQDR_TXQSTS_INDEX 4
#define MTL_Q_TQDR_TXQSTS_WIDTH 1
#define MTL_Q_TQOMR_FTQ_INDEX 0
#define MTL_Q_TQOMR_FTQ_WIDTH 1
#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
@ -852,14 +856,9 @@
#define MTL_TSA_SP 0x00
#define MTL_TSA_ETS 0x02
/* PCS MMD select register offset
* The MMD select register is used for accessing PCS registers
* when the underlying APB3 interface is using indirect addressing.
* Indirect addressing requires accessing registers in two phases,
* an address phase and a data phase. The address phases requires
* writing an address selection value to the MMD select regiesters.
*/
#define PCS_MMD_SELECT 0xff
/* PCS register offsets */
#define PCS_V1_WINDOW_SELECT 0x03fc
#define PCS_V2_WINDOW_SELECT 0x9064
/* SerDes integration register offsets */
#define SIR0_KR_RT_1 0x002c
@ -1027,6 +1026,10 @@
#define MDIO_PMA_10GBR_FECCTRL 0x00ab
#endif
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
#ifndef MDIO_AN_XNP
#define MDIO_AN_XNP 0x0016
#endif
@ -1047,11 +1050,40 @@
#define MDIO_AN_INT 0x8002
#endif
#ifndef MDIO_VEND2_AN_ADVERTISE
#define MDIO_VEND2_AN_ADVERTISE 0x0004
#endif
#ifndef MDIO_VEND2_AN_LP_ABILITY
#define MDIO_VEND2_AN_LP_ABILITY 0x0005
#endif
#ifndef MDIO_VEND2_AN_CTRL
#define MDIO_VEND2_AN_CTRL 0x8001
#endif
#ifndef MDIO_VEND2_AN_STAT
#define MDIO_VEND2_AN_STAT 0x8002
#endif
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
#endif
#ifndef MDIO_VEND2_CTRL1_AN_RESTART
#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
#endif
/* MDIO mask values */
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
#define XGBE_AN_CL73_INC_LINK BIT(1)
#define XGBE_AN_CL73_PG_RCV BIT(2)
#define XGBE_AN_CL73_INT_MASK 0x07
#define XGBE_XNP_MCF_NULL_MESSAGE 0x001
#define XGBE_XNP_ACK_PROCESSED BIT(12)
#define XGBE_XNP_MP_FORMATTED BIT(13)
@ -1060,6 +1092,19 @@
#define XGBE_KR_TRAINING_START BIT(0)
#define XGBE_KR_TRAINING_ENABLE BIT(1)
#define XGBE_PCS_CL37_BP BIT(12)
#define XGBE_AN_CL37_INT_CMPLT BIT(0)
#define XGBE_AN_CL37_INT_MASK 0x01
#define XGBE_AN_CL37_HD_MASK 0x40
#define XGBE_AN_CL37_FD_MASK 0x20
#define XGBE_AN_CL37_PCS_MODE_MASK 0x06
#define XGBE_AN_CL37_PCS_MODE_BASEX 0x00
#define XGBE_AN_CL37_PCS_MODE_SGMII 0x04
#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
@ -1195,12 +1240,18 @@ do { \
/* Macros for building, reading or writing register values or bits
* within the register values of XPCS registers.
*/
#define XPCS_IOWRITE(_pdata, _off, _val) \
#define XPCS32_IOWRITE(_pdata, _off, _val) \
iowrite32(_val, (_pdata)->xpcs_regs + (_off))
#define XPCS_IOREAD(_pdata, _off) \
#define XPCS32_IOREAD(_pdata, _off) \
ioread32((_pdata)->xpcs_regs + (_off))
#define XPCS16_IOWRITE(_pdata, _off, _val) \
iowrite16(_val, (_pdata)->xpcs_regs + (_off))
#define XPCS16_IOREAD(_pdata, _off) \
ioread16((_pdata)->xpcs_regs + (_off))
/* Macros for building, reading or writing register values or bits
* within the register values of SerDes integration registers.
*/

File diff suppressed because it is too large Load Diff

View File

@ -114,7 +114,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
@ -160,18 +159,8 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
if (pdata->per_channel_irq) {
/* Get the DMA interrupt (offset 1) */
ret = platform_get_irq(pdata->pdev, i + 1);
if (ret < 0) {
netdev_err(pdata->netdev,
"platform_get_irq %u failed\n",
i + 1);
goto err_irq;
}
channel->dma_irq = ret;
}
if (pdata->per_channel_irq)
channel->dma_irq = pdata->channel_irq[i];
if (i < pdata->tx_ring_count) {
spin_lock_init(&tx_ring->lock);
@ -194,9 +183,6 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
return 0;
err_irq:
kfree(rx_ring);
err_rx_ring:
kfree(tx_ring);
@ -590,6 +576,10 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->tx_ch_cnt++;
hw_feat->tc_cnt++;
/* Translate the fifo sizes into actual numbers */
hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
DBGPR("<--xgbe_get_all_hw_features\n");
}
@ -778,7 +768,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_rx_data\n");
}
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
@ -1292,8 +1282,8 @@ static int xgbe_open(struct net_device *netdev)
DBGPR("-->xgbe_open\n");
/* Initialize the phy */
ret = xgbe_phy_init(pdata);
/* Reset the phy settings */
ret = xgbe_phy_reset(pdata);
if (ret)
return ret;

View File

@ -316,24 +316,7 @@ static int xgbe_set_settings(struct net_device *netdev,
}
if (cmd->autoneg == AUTONEG_DISABLE) {
switch (speed) {
case SPEED_10000:
break;
case SPEED_2500:
if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) {
netdev_err(netdev, "unsupported speed %u\n",
speed);
return -EINVAL;
}
break;
case SPEED_1000:
if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) {
netdev_err(netdev, "unsupported speed %u\n",
speed);
return -EINVAL;
}
break;
default:
if (!pdata->phy_if.phy_valid_speed(pdata, speed)) {
netdev_err(netdev, "unsupported speed %u\n", speed);
return -EINVAL;
}

View File

@ -116,19 +116,10 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
#include <linux/mdio.h>
#include "xgbe.h"
#include "xgbe-common.h"
@ -145,42 +136,6 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP);
static const u32 xgbe_serdes_blwc[] = {
XGBE_SPEED_1000_BLWC,
XGBE_SPEED_2500_BLWC,
XGBE_SPEED_10000_BLWC,
};
static const u32 xgbe_serdes_cdr_rate[] = {
XGBE_SPEED_1000_CDR,
XGBE_SPEED_2500_CDR,
XGBE_SPEED_10000_CDR,
};
static const u32 xgbe_serdes_pq_skew[] = {
XGBE_SPEED_1000_PQ,
XGBE_SPEED_2500_PQ,
XGBE_SPEED_10000_PQ,
};
static const u32 xgbe_serdes_tx_amp[] = {
XGBE_SPEED_1000_TXAMP,
XGBE_SPEED_2500_TXAMP,
XGBE_SPEED_10000_TXAMP,
};
static const u32 xgbe_serdes_dfe_tap_cfg[] = {
XGBE_SPEED_1000_DFE_TAP_CONFIG,
XGBE_SPEED_2500_DFE_TAP_CONFIG,
XGBE_SPEED_10000_DFE_TAP_CONFIG,
};
static const u32 xgbe_serdes_dfe_tap_ena[] = {
XGBE_SPEED_1000_DFE_TAP_ENABLE,
XGBE_SPEED_2500_DFE_TAP_ENABLE,
XGBE_SPEED_10000_DFE_TAP_ENABLE,
};
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
@ -207,160 +162,25 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
xgbe_init_function_ptrs_dev(&pdata->hw_if);
xgbe_init_function_ptrs_phy(&pdata->phy_if);
xgbe_init_function_ptrs_desc(&pdata->desc_if);
pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
}
#ifdef CONFIG_ACPI
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
u32 property;
int ret;
/* Obtain the system clock setting */
ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
if (ret) {
dev_err(dev, "unable to obtain %s property\n",
XGBE_ACPI_DMA_FREQ);
return ret;
}
pdata->sysclk_rate = property;
/* Obtain the PTP clock setting */
ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
if (ret) {
dev_err(dev, "unable to obtain %s property\n",
XGBE_ACPI_PTP_FREQ);
return ret;
}
pdata->ptpclk_rate = property;
return 0;
}
#else /* CONFIG_ACPI */
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
#endif /* CONFIG_ACPI */
#ifdef CONFIG_OF
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
/* Obtain the system clock setting */
pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
if (IS_ERR(pdata->sysclk)) {
dev_err(dev, "dma devm_clk_get failed\n");
return PTR_ERR(pdata->sysclk);
}
pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
/* Obtain the PTP clock setting */
pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
if (IS_ERR(pdata->ptpclk)) {
dev_err(dev, "ptp devm_clk_get failed\n");
return PTR_ERR(pdata->ptpclk);
}
pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
return 0;
}
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
struct device_node *phy_node;
struct platform_device *phy_pdev;
phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
if (phy_node) {
/* Old style device tree:
* The XGBE and PHY resources are separate
*/
phy_pdev = of_find_device_by_node(phy_node);
of_node_put(phy_node);
} else {
/* New style device tree:
* The XGBE and PHY resources are grouped together with
* the PHY resources listed last
*/
get_device(dev);
phy_pdev = pdata->pdev;
}
return phy_pdev;
}
#else /* CONFIG_OF */
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
return NULL;
}
#endif /* CONFIG_OF */
static unsigned int xgbe_resource_count(struct platform_device *pdev,
unsigned int type)
{
unsigned int count;
int i;
for (i = 0, count = 0; i < pdev->num_resources; i++) {
struct resource *res = &pdev->resource[i];
if (type == resource_type(res))
count++;
}
return count;
}
static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
{
struct platform_device *phy_pdev;
if (pdata->use_acpi) {
get_device(pdata->dev);
phy_pdev = pdata->pdev;
} else {
phy_pdev = xgbe_of_get_phy_pdev(pdata);
}
return phy_pdev;
}
static int xgbe_probe(struct platform_device *pdev)
struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
{
struct xgbe_prv_data *pdata;
struct net_device *netdev;
struct device *dev = &pdev->dev, *phy_dev;
struct platform_device *phy_pdev;
struct resource *res;
const char *phy_mode;
unsigned int i, phy_memnum, phy_irqnum;
enum dev_dma_attr attr;
int ret;
DBGPR("--> xgbe_probe\n");
netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
XGBE_MAX_DMA_CHANNELS);
if (!netdev) {
dev_err(dev, "alloc_etherdev failed\n");
ret = -ENOMEM;
goto err_alloc;
dev_err(dev, "alloc_etherdev_mq failed\n");
return ERR_PTR(-ENOMEM);
}
SET_NETDEV_DEV(netdev, dev);
pdata = netdev_priv(netdev);
pdata->netdev = netdev;
pdata->pdev = pdev;
pdata->adev = ACPI_COMPANION(dev);
pdata->dev = dev;
platform_set_drvdata(pdev, netdev);
spin_lock_init(&pdata->lock);
spin_lock_init(&pdata->xpcs_lock);
@ -371,291 +191,82 @@ static int xgbe_probe(struct platform_device *pdev)
set_bit(XGBE_DOWN, &pdata->dev_state);
/* Check if we should use ACPI or DT */
pdata->use_acpi = dev->of_node ? 0 : 1;
return pdata;
}
phy_pdev = xgbe_get_phy_pdev(pdata);
if (!phy_pdev) {
dev_err(dev, "unable to obtain phy device\n");
ret = -EINVAL;
goto err_phydev;
}
phy_dev = &phy_pdev->dev;
void xgbe_free_pdata(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = pdata->netdev;
if (pdev == phy_pdev) {
/* New style device tree or ACPI:
* The XGBE and PHY resources are grouped together with
* the PHY resources listed last
*/
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
} else {
/* Old style device tree:
* The XGBE and PHY resources are separate
*/
phy_memnum = 0;
phy_irqnum = 0;
}
free_netdev(netdev);
}
/* Set and validate the number of descriptors for a ring */
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
dev_err(dev, "tx descriptor count (%d) is not valid\n",
pdata->tx_desc_count);
ret = -EINVAL;
goto err_io;
}
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
pdata->rx_desc_count = XGBE_RX_DESC_CNT;
if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
dev_err(dev, "rx descriptor count (%d) is not valid\n",
pdata->rx_desc_count);
ret = -EINVAL;
goto err_io;
}
void xgbe_set_counts(struct xgbe_prv_data *pdata)
{
/* Set all the function pointers */
xgbe_init_all_fptrs(pdata);
/* Obtain the mmio areas for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdata->xgmac_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->xgmac_regs)) {
dev_err(dev, "xgmac ioremap failed\n");
ret = PTR_ERR(pdata->xgmac_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
/* Populate the hardware features */
xgbe_get_all_hw_features(pdata);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
pdata->xpcs_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->xpcs_regs)) {
dev_err(dev, "xpcs ioremap failed\n");
ret = PTR_ERR(pdata->xpcs_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
/* Set default max values if not provided */
if (!pdata->tx_max_channel_count)
pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
if (!pdata->rx_max_channel_count)
pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->rxtx_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->rxtx_regs)) {
dev_err(dev, "rxtx ioremap failed\n");
ret = PTR_ERR(pdata->rxtx_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
if (!pdata->tx_max_q_count)
pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
if (!pdata->rx_max_q_count)
pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->sir0_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->sir0_regs)) {
dev_err(dev, "sir0 ioremap failed\n");
ret = PTR_ERR(pdata->sir0_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
/* Calculate the number of Tx and Rx rings to be created
* -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
* the number of Tx queues to the number of Tx channels
* enabled
* -Rx (DMA) Channels do not map 1-to-1 so use the actual
* number of Rx queues or maximum allowed
*/
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.tx_ch_cnt);
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
pdata->tx_max_channel_count);
pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
pdata->tx_max_q_count);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->sir1_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->sir1_regs)) {
dev_err(dev, "sir1 ioremap failed\n");
ret = PTR_ERR(pdata->sir1_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
pdata->tx_q_count = pdata->tx_ring_count;
/* Retrieve the MAC address */
ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
pdata->mac_addr,
sizeof(pdata->mac_addr));
if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
if (!ret)
ret = -EINVAL;
goto err_io;
}
pdata->rx_ring_count = min_t(unsigned int,
netif_get_num_default_rss_queues(),
pdata->hw_feat.rx_ch_cnt);
pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
pdata->rx_max_channel_count);
/* Retrieve the PHY mode - it must be "xgmii" */
ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
&phy_mode);
if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
if (!ret)
ret = -EINVAL;
goto err_io;
}
pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
pdata->rx_max_q_count);
/* Check for per channel interrupt support */
if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
pdata->per_channel_irq = 1;
/* Retrieve the PHY speedset */
ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
&pdata->speed_set);
if (ret) {
dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
goto err_io;
if (netif_msg_probe(pdata)) {
dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
pdata->tx_ring_count, pdata->rx_ring_count);
dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
pdata->tx_q_count, pdata->rx_q_count);
}
}
switch (pdata->speed_set) {
case XGBE_SPEEDSET_1000_10000:
case XGBE_SPEEDSET_2500_10000:
break;
default:
dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
ret = -EINVAL;
goto err_io;
}
/* Retrieve the PHY configuration properties */
if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_BLWC_PROPERTY,
pdata->serdes_blwc,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_BLWC_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
sizeof(pdata->serdes_blwc));
}
if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_CDR_RATE_PROPERTY,
pdata->serdes_cdr_rate,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_CDR_RATE_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
sizeof(pdata->serdes_cdr_rate));
}
if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_PQ_SKEW_PROPERTY,
pdata->serdes_pq_skew,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_PQ_SKEW_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
sizeof(pdata->serdes_pq_skew));
}
if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_TX_AMP_PROPERTY,
pdata->serdes_tx_amp,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_TX_AMP_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
sizeof(pdata->serdes_tx_amp));
}
if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_DFE_CFG_PROPERTY,
pdata->serdes_dfe_tap_cfg,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_DFE_CFG_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
sizeof(pdata->serdes_dfe_tap_cfg));
}
if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
ret = device_property_read_u32_array(phy_dev,
XGBE_DFE_ENA_PROPERTY,
pdata->serdes_dfe_tap_ena,
XGBE_SPEEDS);
if (ret) {
dev_err(dev, "invalid %s property\n",
XGBE_DFE_ENA_PROPERTY);
goto err_io;
}
} else {
memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
sizeof(pdata->serdes_dfe_tap_ena));
}
/* Obtain device settings unique to ACPI/OF */
if (pdata->use_acpi)
ret = xgbe_acpi_support(pdata);
else
ret = xgbe_of_support(pdata);
if (ret)
goto err_io;
/* Set the DMA coherency values */
attr = device_get_dma_attr(dev);
if (attr == DEV_DMA_NOT_SUPPORTED) {
dev_err(dev, "DMA is not supported");
ret = -ENODEV;
goto err_io;
}
pdata->coherent = (attr == DEV_DMA_COHERENT);
if (pdata->coherent) {
pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
pdata->arcache = XGBE_DMA_OS_ARCACHE;
pdata->awcache = XGBE_DMA_OS_AWCACHE;
} else {
pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
pdata->arcache = XGBE_DMA_SYS_ARCACHE;
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
}
/* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(dev, "platform_get_irq 0 failed\n");
goto err_io;
}
pdata->dev_irq = ret;
/* Get the auto-negotiation interrupt */
ret = platform_get_irq(phy_pdev, phy_irqnum++);
if (ret < 0) {
dev_err(dev, "platform_get_irq phy 0 failed\n");
goto err_io;
}
pdata->an_irq = ret;
int xgbe_config_netdev(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = pdata->netdev;
struct device *dev = pdata->dev;
unsigned int i;
int ret;
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
/* Set all the function pointers */
xgbe_init_all_fptrs(pdata);
/* Issue software reset to device */
pdata->hw_if.exit(pdata);
/* Populate the hardware features */
xgbe_get_all_hw_features(pdata);
/* Set default configuration data */
xgbe_default_config(pdata);
@ -664,33 +275,33 @@ static int xgbe_probe(struct platform_device *pdev)
DMA_BIT_MASK(pdata->hw_feat.dma_width));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed\n");
goto err_io;
return ret;
}
/* Calculate the number of Tx and Rx rings to be created
* -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
* the number of Tx queues to the number of Tx channels
* enabled
* -Rx (DMA) Channels do not map 1-to-1 so use the actual
* number of Rx queues
*/
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.tx_ch_cnt);
pdata->tx_q_count = pdata->tx_ring_count;
/* Set default max values if not provided */
if (!pdata->tx_max_fifo_size)
pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
if (!pdata->rx_max_fifo_size)
pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
/* Set and validate the number of descriptors for a ring */
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
pdata->rx_desc_count = XGBE_RX_DESC_CNT;
/* Set the number of queues */
ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
if (ret) {
dev_err(dev, "error setting real tx queue count\n");
goto err_io;
return ret;
}
pdata->rx_ring_count = min_t(unsigned int,
netif_get_num_default_rss_queues(),
pdata->hw_feat.rx_ch_cnt);
pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
if (ret) {
dev_err(dev, "error setting real rx queue count\n");
goto err_io;
return ret;
}
/* Initialize RSS hash key and lookup table */
@ -705,7 +316,9 @@ static int xgbe_probe(struct platform_device *pdev)
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Call MDIO/PHY initialization routine */
pdata->phy_if.phy_init(pdata);
ret = pdata->phy_if.phy_init(pdata);
if (ret)
return ret;
/* Set device operations */
netdev->netdev_ops = xgbe_get_netdev_ops();
@ -752,7 +365,7 @@ static int xgbe_probe(struct platform_device *pdev)
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "net device registration failed\n");
goto err_io;
return ret;
}
/* Create the PHY/ANEG name based on netdev name */
@ -780,12 +393,6 @@ static int xgbe_probe(struct platform_device *pdev)
xgbe_debugfs_init(pdata);
platform_device_put(phy_pdev);
netdev_notice(netdev, "net device enabled\n");
DBGPR("<-- xgbe_probe\n");
return 0;
err_wq:
@ -794,29 +401,19 @@ static int xgbe_probe(struct platform_device *pdev)
err_netdev:
unregister_netdev(netdev);
err_io:
platform_device_put(phy_pdev);
err_phydev:
free_netdev(netdev);
err_alloc:
dev_notice(dev, "net device not enabled\n");
return ret;
}
static int xgbe_remove(struct platform_device *pdev)
void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct xgbe_prv_data *pdata = netdev_priv(netdev);
DBGPR("-->xgbe_remove\n");
struct net_device *netdev = pdata->netdev;
xgbe_debugfs_exit(pdata);
xgbe_ptp_unregister(pdata);
pdata->phy_if.phy_exit(pdata);
flush_workqueue(pdata->an_workqueue);
destroy_workqueue(pdata->an_workqueue);
@ -824,94 +421,23 @@ static int xgbe_remove(struct platform_device *pdev)
destroy_workqueue(pdata->dev_workqueue);
unregister_netdev(netdev);
}
free_netdev(netdev);
static int __init xgbe_mod_init(void)
{
int ret;
DBGPR("<--xgbe_remove\n");
ret = xgbe_platform_init();
if (ret)
return ret;
return 0;
}
#ifdef CONFIG_PM
static int xgbe_suspend(struct device *dev)
static void __exit xgbe_mod_exit(void)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct xgbe_prv_data *pdata = netdev_priv(netdev);
int ret = 0;
DBGPR("-->xgbe_suspend\n");
if (netif_running(netdev))
ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
DBGPR("<--xgbe_suspend\n");
return ret;
xgbe_platform_exit();
}
static int xgbe_resume(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct xgbe_prv_data *pdata = netdev_priv(netdev);
int ret = 0;
DBGPR("-->xgbe_resume\n");
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
if (netif_running(netdev)) {
ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
/* Schedule a restart in case the link or phy state changed
* while we were powered down.
*/
schedule_work(&pdata->restart_work);
}
DBGPR("<--xgbe_resume\n");
return ret;
}
#endif /* CONFIG_PM */
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
{ "AMDI8001", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
#endif
#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a", },
{},
};
MODULE_DEVICE_TABLE(of, xgbe_of_match);
#endif
static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
static struct platform_driver xgbe_driver = {
.driver = {
.name = "amd-xgbe",
#ifdef CONFIG_ACPI
.acpi_match_table = xgbe_acpi_match,
#endif
#ifdef CONFIG_OF
.of_match_table = xgbe_of_match,
#endif
.pm = &xgbe_pm_ops,
},
.probe = xgbe_probe,
.remove = xgbe_remove,
};
module_platform_driver(xgbe_driver);
module_init(xgbe_mod_init);
module_exit(xgbe_mod_exit);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,828 @@
/*
* AMD 10Gb Ethernet driver
*
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
* Copyright (c) 2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
* your option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
*
* License 2: Modified BSD
*
* Copyright (c) 2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/device.h>
#include <linux/property.h>
#include <linux/mdio.h>
#include <linux/phy.h>
#include "xgbe.h"
#include "xgbe-common.h"
#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
/* Default SerDes settings */
#define XGBE_SPEED_1000_BLWC 1
#define XGBE_SPEED_1000_CDR 0x2
#define XGBE_SPEED_1000_PLL 0x0
#define XGBE_SPEED_1000_PQ 0xa
#define XGBE_SPEED_1000_RATE 0x3
#define XGBE_SPEED_1000_TXAMP 0xf
#define XGBE_SPEED_1000_WORD 0x1
#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
#define XGBE_SPEED_2500_BLWC 1
#define XGBE_SPEED_2500_CDR 0x2
#define XGBE_SPEED_2500_PLL 0x0
#define XGBE_SPEED_2500_PQ 0xa
#define XGBE_SPEED_2500_RATE 0x1
#define XGBE_SPEED_2500_TXAMP 0xf
#define XGBE_SPEED_2500_WORD 0x1
#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
#define XGBE_SPEED_10000_BLWC 0
#define XGBE_SPEED_10000_CDR 0x7
#define XGBE_SPEED_10000_PLL 0x1
#define XGBE_SPEED_10000_PQ 0x12
#define XGBE_SPEED_10000_RATE 0x0
#define XGBE_SPEED_10000_TXAMP 0xa
#define XGBE_SPEED_10000_WORD 0x7
#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
/* Rate-change complete wait/retry count */
#define XGBE_RATECHANGE_COUNT 500
static const u32 xgbe_phy_blwc[] = {
XGBE_SPEED_1000_BLWC,
XGBE_SPEED_2500_BLWC,
XGBE_SPEED_10000_BLWC,
};
static const u32 xgbe_phy_cdr_rate[] = {
XGBE_SPEED_1000_CDR,
XGBE_SPEED_2500_CDR,
XGBE_SPEED_10000_CDR,
};
static const u32 xgbe_phy_pq_skew[] = {
XGBE_SPEED_1000_PQ,
XGBE_SPEED_2500_PQ,
XGBE_SPEED_10000_PQ,
};
static const u32 xgbe_phy_tx_amp[] = {
XGBE_SPEED_1000_TXAMP,
XGBE_SPEED_2500_TXAMP,
XGBE_SPEED_10000_TXAMP,
};
static const u32 xgbe_phy_dfe_tap_cfg[] = {
XGBE_SPEED_1000_DFE_TAP_CONFIG,
XGBE_SPEED_2500_DFE_TAP_CONFIG,
XGBE_SPEED_10000_DFE_TAP_CONFIG,
};
static const u32 xgbe_phy_dfe_tap_ena[] = {
XGBE_SPEED_1000_DFE_TAP_ENABLE,
XGBE_SPEED_2500_DFE_TAP_ENABLE,
XGBE_SPEED_10000_DFE_TAP_ENABLE,
};
struct xgbe_phy_data {
/* 1000/10000 vs 2500/10000 indicator */
unsigned int speed_set;
/* SerDes UEFI configurable settings.
* Switching between modes/speeds requires new values for some
* SerDes settings. The values can be supplied as device
* properties in array format. The first array entry is for
* 1GbE, second for 2.5GbE and third for 10GbE
*/
u32 blwc[XGBE_SPEEDS];
u32 cdr_rate[XGBE_SPEEDS];
u32 pq_skew[XGBE_SPEEDS];
u32 tx_amp[XGBE_SPEEDS];
u32 dfe_tap_cfg[XGBE_SPEEDS];
u32 dfe_tap_ena[XGBE_SPEEDS];
};
static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
{
XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
}
static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
{
XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
}
static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
enum xgbe_mode mode;
unsigned int ad_reg, lp_reg;
pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
pdata->phy.lp_advertising |= ADVERTISED_Backplane;
/* Compare Advertisement and Link Partner register 1 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
if (lp_reg & 0x400)
pdata->phy.lp_advertising |= ADVERTISED_Pause;
if (lp_reg & 0x800)
pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
if (pdata->phy.pause_autoneg) {
/* Set flow control based on auto-negotiation result */
pdata->phy.tx_pause = 0;
pdata->phy.rx_pause = 0;
if (ad_reg & lp_reg & 0x400) {
pdata->phy.tx_pause = 1;
pdata->phy.rx_pause = 1;
} else if (ad_reg & lp_reg & 0x800) {
if (ad_reg & 0x400)
pdata->phy.rx_pause = 1;
else if (lp_reg & 0x400)
pdata->phy.tx_pause = 1;
}
}
/* Compare Advertisement and Link Partner register 2 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
if (lp_reg & 0x80)
pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
if (lp_reg & 0x20) {
if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
else
pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
}
ad_reg &= lp_reg;
if (ad_reg & 0x80) {
mode = XGBE_MODE_KR;
} else if (ad_reg & 0x20) {
if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
mode = XGBE_MODE_KX_2500;
else
mode = XGBE_MODE_KX_1000;
} else {
mode = XGBE_MODE_UNKNOWN;
}
/* Compare Advertisement and Link Partner register 3 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
if (lp_reg & 0xc000)
pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
return mode;
}
static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
{
return XGBE_AN_MODE_CL73;
}
static void xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata)
{
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
usleep_range(75, 100);
reg &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
}
static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
{
/* Assert Rx and Tx ratechange */
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
}
static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
{
unsigned int wait;
u16 status;
/* Release Rx and Tx ratechange */
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
/* Wait for Rx and Tx ready */
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
usleep_range(50, 75);
status = XSIR0_IOREAD(pdata, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
goto rx_reset;
}
netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
status);
rx_reset:
/* Perform Rx reset for the DFE changes */
XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
}
static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg;
/* Set PCS to KR/10G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBR;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED10G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
xgbe_phy_pcs_power_cycle(pdata);
/* Set SerDes to 10G speed */
xgbe_phy_start_ratechange(pdata);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
phy_data->cdr_rate[XGBE_SPEED_10000]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
phy_data->tx_amp[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
phy_data->blwc[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
phy_data->pq_skew[XGBE_SPEED_10000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
phy_data->dfe_tap_cfg[XGBE_SPEED_10000]);
XRXTX_IOWRITE(pdata, RXTX_REG22,
phy_data->dfe_tap_ena[XGBE_SPEED_10000]);
xgbe_phy_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
}
static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg;
/* Set PCS to KX/1G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBX;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED1G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
xgbe_phy_pcs_power_cycle(pdata);
/* Set SerDes to 2.5G speed */
xgbe_phy_start_ratechange(pdata);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
phy_data->cdr_rate[XGBE_SPEED_2500]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
phy_data->tx_amp[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
phy_data->blwc[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
phy_data->pq_skew[XGBE_SPEED_2500]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
phy_data->dfe_tap_cfg[XGBE_SPEED_2500]);
XRXTX_IOWRITE(pdata, RXTX_REG22,
phy_data->dfe_tap_ena[XGBE_SPEED_2500]);
xgbe_phy_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
}
static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg;
/* Set PCS to KX/1G speed */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= ~MDIO_PCS_CTRL2_TYPE;
reg |= MDIO_PCS_CTRL2_10GBX;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg &= ~MDIO_CTRL1_SPEEDSEL;
reg |= MDIO_CTRL1_SPEED1G;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
xgbe_phy_pcs_power_cycle(pdata);
/* Set SerDes to 1G speed */
xgbe_phy_start_ratechange(pdata);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
phy_data->cdr_rate[XGBE_SPEED_1000]);
XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
phy_data->tx_amp[XGBE_SPEED_1000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
phy_data->blwc[XGBE_SPEED_1000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
phy_data->pq_skew[XGBE_SPEED_1000]);
XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
phy_data->dfe_tap_cfg[XGBE_SPEED_1000]);
XRXTX_IOWRITE(pdata, RXTX_REG22,
phy_data->dfe_tap_ena[XGBE_SPEED_1000]);
xgbe_phy_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
}
static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
enum xgbe_mode mode;
unsigned int reg;
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
reg &= MDIO_PCS_CTRL2_TYPE;
if (reg == MDIO_PCS_CTRL2_10GBR) {
mode = XGBE_MODE_KR;
} else {
if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
mode = XGBE_MODE_KX_2500;
else
mode = XGBE_MODE_KX_1000;
}
return mode;
}
static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
enum xgbe_mode mode;
/* If we are in KR switch to KX, and vice-versa */
if (xgbe_phy_cur_mode(pdata) == XGBE_MODE_KR) {
if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
mode = XGBE_MODE_KX_2500;
else
mode = XGBE_MODE_KX_1000;
} else {
mode = XGBE_MODE_KR;
}
return mode;
}
static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
int speed)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
switch (speed) {
case SPEED_1000:
return (phy_data->speed_set == XGBE_SPEEDSET_1000_10000)
? XGBE_MODE_KX_1000 : XGBE_MODE_UNKNOWN;
case SPEED_2500:
return (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
? XGBE_MODE_KX_2500 : XGBE_MODE_UNKNOWN;
case SPEED_10000:
return XGBE_MODE_KR;
default:
return XGBE_MODE_UNKNOWN;
}
}
static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
{
switch (mode) {
case XGBE_MODE_KX_1000:
xgbe_phy_kx_1000_mode(pdata);
break;
case XGBE_MODE_KX_2500:
xgbe_phy_kx_2500_mode(pdata);
break;
case XGBE_MODE_KR:
xgbe_phy_kr_mode(pdata);
break;
default:
break;
}
}
static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
enum xgbe_mode mode, u32 advert)
{
if (pdata->phy.autoneg == AUTONEG_ENABLE) {
if (pdata->phy.advertising & advert)
return true;
} else {
enum xgbe_mode cur_mode;
cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
if (cur_mode == mode)
return true;
}
return false;
}
static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
{
switch (mode) {
case XGBE_MODE_KX_1000:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_1000baseKX_Full);
case XGBE_MODE_KX_2500:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_2500baseX_Full);
case XGBE_MODE_KR:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_10000baseKR_Full);
default:
return false;
}
}
static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
switch (speed) {
case SPEED_1000:
if (phy_data->speed_set != XGBE_SPEEDSET_1000_10000)
return false;
return true;
case SPEED_2500:
if (phy_data->speed_set != XGBE_SPEEDSET_2500_10000)
return false;
return true;
case SPEED_10000:
return true;
default:
return false;
}
}
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata)
{
unsigned int reg;
/* Link status is latched low, so read once to clear
* and then read again to get current state
*/
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
return (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
}
static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
{
/* Nothing uniquely required for stop */
}
static int xgbe_phy_start(struct xgbe_prv_data *pdata)
{
/* Nothing uniquely required for start */
return 0;
}
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
unsigned int reg, count;
/* Perform a software reset of the PCS */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
reg |= MDIO_CTRL1_RESET;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
count = 50;
do {
msleep(20);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
} while ((reg & MDIO_CTRL1_RESET) && --count);
if (reg & MDIO_CTRL1_RESET)
return -ETIMEDOUT;
return 0;
}
static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
{
/* Nothing uniquely required for exit */
}
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data;
int ret;
phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
/* Retrieve the PHY speedset */
ret = device_property_read_u32(pdata->phy_dev, XGBE_SPEEDSET_PROPERTY,
&phy_data->speed_set);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_SPEEDSET_PROPERTY);
return ret;
}
switch (phy_data->speed_set) {
case XGBE_SPEEDSET_1000_10000:
case XGBE_SPEEDSET_2500_10000:
break;
default:
dev_err(pdata->dev, "invalid %s property\n",
XGBE_SPEEDSET_PROPERTY);
return -EINVAL;
}
/* Retrieve the PHY configuration properties */
if (device_property_present(pdata->phy_dev, XGBE_BLWC_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_BLWC_PROPERTY,
phy_data->blwc,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_BLWC_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->blwc, xgbe_phy_blwc,
sizeof(phy_data->blwc));
}
if (device_property_present(pdata->phy_dev, XGBE_CDR_RATE_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_CDR_RATE_PROPERTY,
phy_data->cdr_rate,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_CDR_RATE_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->cdr_rate, xgbe_phy_cdr_rate,
sizeof(phy_data->cdr_rate));
}
if (device_property_present(pdata->phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_PQ_SKEW_PROPERTY,
phy_data->pq_skew,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_PQ_SKEW_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->pq_skew, xgbe_phy_pq_skew,
sizeof(phy_data->pq_skew));
}
if (device_property_present(pdata->phy_dev, XGBE_TX_AMP_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_TX_AMP_PROPERTY,
phy_data->tx_amp,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_TX_AMP_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->tx_amp, xgbe_phy_tx_amp,
sizeof(phy_data->tx_amp));
}
if (device_property_present(pdata->phy_dev, XGBE_DFE_CFG_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_DFE_CFG_PROPERTY,
phy_data->dfe_tap_cfg,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_DFE_CFG_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->dfe_tap_cfg, xgbe_phy_dfe_tap_cfg,
sizeof(phy_data->dfe_tap_cfg));
}
if (device_property_present(pdata->phy_dev, XGBE_DFE_ENA_PROPERTY)) {
ret = device_property_read_u32_array(pdata->phy_dev,
XGBE_DFE_ENA_PROPERTY,
phy_data->dfe_tap_ena,
XGBE_SPEEDS);
if (ret) {
dev_err(pdata->dev, "invalid %s property\n",
XGBE_DFE_ENA_PROPERTY);
return ret;
}
} else {
memcpy(phy_data->dfe_tap_ena, xgbe_phy_dfe_tap_ena,
sizeof(phy_data->dfe_tap_ena));
}
/* Initialize supported features */
pdata->phy.supported = SUPPORTED_Autoneg;
pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
pdata->phy.supported |= SUPPORTED_Backplane;
pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
switch (phy_data->speed_set) {
case XGBE_SPEEDSET_1000_10000:
pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
break;
case XGBE_SPEEDSET_2500_10000:
pdata->phy.supported |= SUPPORTED_2500baseX_Full;
break;
}
if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
pdata->phy_data = phy_data;
return 0;
}
void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *phy_if)
{
struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
phy_impl->init = xgbe_phy_init;
phy_impl->exit = xgbe_phy_exit;
phy_impl->reset = xgbe_phy_reset;
phy_impl->start = xgbe_phy_start;
phy_impl->stop = xgbe_phy_stop;
phy_impl->link_status = xgbe_phy_link_status;
phy_impl->valid_speed = xgbe_phy_valid_speed;
phy_impl->use_mode = xgbe_phy_use_mode;
phy_impl->set_mode = xgbe_phy_set_mode;
phy_impl->get_mode = xgbe_phy_get_mode;
phy_impl->switch_mode = xgbe_phy_switch_mode;
phy_impl->cur_mode = xgbe_phy_cur_mode;
phy_impl->an_mode = xgbe_phy_an_mode;
phy_impl->an_outcome = xgbe_phy_an_outcome;
phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
phy_impl->kr_training_post = xgbe_phy_kr_training_post;
}

View File

@ -0,0 +1,632 @@
/*
* AMD 10Gb Ethernet driver
*
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
* Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
* your option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
*
* License 2: Modified BSD
*
* Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* The Synopsys DWC ETHER XGMAC Software Driver and documentation
* (hereinafter "Software") is an unsupported proprietary work of Synopsys,
* Inc. unless otherwise expressly agreed to in writing between Synopsys
* and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product
* under any End User Software License Agreement or Agreement for Licensed
* Product with Synopsys or any supplement thereto. Permission is hereby
* granted, free of charge, to any person obtaining a copy of this software
* annotated with this license and the Software, to deal in the Software
* without restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
* BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
#include <linux/mdio.h>
#include "xgbe.h"
#include "xgbe-common.h"
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[];
static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
{
const struct acpi_device_id *id;
id = acpi_match_device(xgbe_acpi_match, pdata->dev);
return id ? (struct xgbe_version_data *)id->driver_data : NULL;
}
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
u32 property;
int ret;
/* Obtain the system clock setting */
ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
if (ret) {
dev_err(dev, "unable to obtain %s property\n",
XGBE_ACPI_DMA_FREQ);
return ret;
}
pdata->sysclk_rate = property;
/* Obtain the PTP clock setting */
ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
if (ret) {
dev_err(dev, "unable to obtain %s property\n",
XGBE_ACPI_PTP_FREQ);
return ret;
}
pdata->ptpclk_rate = property;
return 0;
}
#else /* CONFIG_ACPI */
static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
{
return NULL;
}
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
#endif /* CONFIG_ACPI */
#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[];
static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
{
const struct of_device_id *id;
id = of_match_device(xgbe_of_match, pdata->dev);
return id ? (struct xgbe_version_data *)id->data : NULL;
}
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
/* Obtain the system clock setting */
pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
if (IS_ERR(pdata->sysclk)) {
dev_err(dev, "dma devm_clk_get failed\n");
return PTR_ERR(pdata->sysclk);
}
pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
/* Obtain the PTP clock setting */
pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
if (IS_ERR(pdata->ptpclk)) {
dev_err(dev, "ptp devm_clk_get failed\n");
return PTR_ERR(pdata->ptpclk);
}
pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
return 0;
}
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
struct device_node *phy_node;
struct platform_device *phy_pdev;
phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
if (phy_node) {
/* Old style device tree:
* The XGBE and PHY resources are separate
*/
phy_pdev = of_find_device_by_node(phy_node);
of_node_put(phy_node);
} else {
/* New style device tree:
* The XGBE and PHY resources are grouped together with
* the PHY resources listed last
*/
get_device(dev);
phy_pdev = pdata->platdev;
}
return phy_pdev;
}
#else /* CONFIG_OF */
static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
{
return NULL;
}
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
{
return NULL;
}
#endif /* CONFIG_OF */
static unsigned int xgbe_resource_count(struct platform_device *pdev,
unsigned int type)
{
unsigned int count;
int i;
for (i = 0, count = 0; i < pdev->num_resources; i++) {
struct resource *res = &pdev->resource[i];
if (type == resource_type(res))
count++;
}
return count;
}
static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
{
struct platform_device *phy_pdev;
if (pdata->use_acpi) {
get_device(pdata->dev);
phy_pdev = pdata->platdev;
} else {
phy_pdev = xgbe_of_get_phy_pdev(pdata);
}
return phy_pdev;
}
static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata)
{
return pdata->use_acpi ? xgbe_acpi_vdata(pdata)
: xgbe_of_vdata(pdata);
}
static int xgbe_platform_probe(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
struct platform_device *phy_pdev;
struct resource *res;
const char *phy_mode;
unsigned int phy_memnum, phy_irqnum;
unsigned int dma_irqnum, dma_irqend;
enum dev_dma_attr attr;
int ret;
pdata = xgbe_alloc_pdata(dev);
if (IS_ERR(pdata)) {
ret = PTR_ERR(pdata);
goto err_alloc;
}
pdata->platdev = pdev;
pdata->adev = ACPI_COMPANION(dev);
platform_set_drvdata(pdev, pdata);
/* Check if we should use ACPI or DT */
pdata->use_acpi = dev->of_node ? 0 : 1;
/* Get the version data */
pdata->vdata = xgbe_get_vdata(pdata);
phy_pdev = xgbe_get_phy_pdev(pdata);
if (!phy_pdev) {
dev_err(dev, "unable to obtain phy device\n");
ret = -EINVAL;
goto err_phydev;
}
pdata->phy_platdev = phy_pdev;
pdata->phy_dev = &phy_pdev->dev;
if (pdev == phy_pdev) {
/* New style device tree or ACPI:
* The XGBE and PHY resources are grouped together with
* the PHY resources listed last
*/
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
dma_irqnum = 1;
dma_irqend = phy_irqnum;
} else {
/* Old style device tree:
* The XGBE and PHY resources are separate
*/
phy_memnum = 0;
phy_irqnum = 0;
dma_irqnum = 1;
dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
}
/* Obtain the mmio areas for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdata->xgmac_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->xgmac_regs)) {
dev_err(dev, "xgmac ioremap failed\n");
ret = PTR_ERR(pdata->xgmac_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
pdata->xpcs_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->xpcs_regs)) {
dev_err(dev, "xpcs ioremap failed\n");
ret = PTR_ERR(pdata->xpcs_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->rxtx_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->rxtx_regs)) {
dev_err(dev, "rxtx ioremap failed\n");
ret = PTR_ERR(pdata->rxtx_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->sir0_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->sir0_regs)) {
dev_err(dev, "sir0 ioremap failed\n");
ret = PTR_ERR(pdata->sir0_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
pdata->sir1_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->sir1_regs)) {
dev_err(dev, "sir1 ioremap failed\n");
ret = PTR_ERR(pdata->sir1_regs);
goto err_io;
}
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
/* Retrieve the MAC address */
ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
pdata->mac_addr,
sizeof(pdata->mac_addr));
if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
if (!ret)
ret = -EINVAL;
goto err_io;
}
/* Retrieve the PHY mode - it must be "xgmii" */
ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
&phy_mode);
if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
if (!ret)
ret = -EINVAL;
goto err_io;
}
pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
/* Check for per channel interrupt support */
if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
pdata->per_channel_irq = 1;
/* Obtain device settings unique to ACPI/OF */
if (pdata->use_acpi)
ret = xgbe_acpi_support(pdata);
else
ret = xgbe_of_support(pdata);
if (ret)
goto err_io;
/* Set the DMA coherency values */
attr = device_get_dma_attr(dev);
if (attr == DEV_DMA_NOT_SUPPORTED) {
dev_err(dev, "DMA is not supported");
ret = -ENODEV;
goto err_io;
}
pdata->coherent = (attr == DEV_DMA_COHERENT);
if (pdata->coherent) {
pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
pdata->arcache = XGBE_DMA_OS_ARCACHE;
pdata->awcache = XGBE_DMA_OS_AWCACHE;
} else {
pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
pdata->arcache = XGBE_DMA_SYS_ARCACHE;
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
}
/* Set the maximum fifo amounts */
pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size;
pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size;
/* Set the hardware channel and queue counts */
xgbe_set_counts(pdata);
/* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(dev, "platform_get_irq 0 failed\n");
goto err_io;
}
pdata->dev_irq = ret;
/* Get the per channel DMA interrupts */
if (pdata->per_channel_irq) {
unsigned int i, max = ARRAY_SIZE(pdata->channel_irq);
for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
ret = platform_get_irq(pdata->platdev, dma_irqnum++);
if (ret < 0) {
netdev_err(pdata->netdev,
"platform_get_irq %u failed\n",
dma_irqnum - 1);
goto err_io;
}
pdata->channel_irq[i] = ret;
}
}
/* Get the auto-negotiation interrupt */
ret = platform_get_irq(phy_pdev, phy_irqnum++);
if (ret < 0) {
dev_err(dev, "platform_get_irq phy 0 failed\n");
goto err_io;
}
pdata->an_irq = ret;
/* Configure the netdev resource */
ret = xgbe_config_netdev(pdata);
if (ret)
goto err_io;
netdev_notice(pdata->netdev, "net device enabled\n");
return 0;
err_io:
platform_device_put(phy_pdev);
err_phydev:
xgbe_free_pdata(pdata);
err_alloc:
dev_notice(dev, "net device not enabled\n");
return ret;
}
static int xgbe_platform_remove(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
xgbe_deconfig_netdev(pdata);
platform_device_put(pdata->phy_platdev);
xgbe_free_pdata(pdata);
return 0;
}
#ifdef CONFIG_PM
static int xgbe_platform_suspend(struct device *dev)
{
struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
struct net_device *netdev = pdata->netdev;
int ret = 0;
DBGPR("-->xgbe_suspend\n");
if (netif_running(netdev))
ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
DBGPR("<--xgbe_suspend\n");
return ret;
}
static int xgbe_platform_resume(struct device *dev)
{
struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
struct net_device *netdev = pdata->netdev;
int ret = 0;
DBGPR("-->xgbe_resume\n");
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
if (netif_running(netdev)) {
ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
/* Schedule a restart in case the link or phy state changed
* while we were powered down.
*/
schedule_work(&pdata->restart_work);
}
DBGPR("<--xgbe_resume\n");
return ret;
}
#endif /* CONFIG_PM */
static const struct xgbe_version_data xgbe_v1 = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
.xpcs_access = XGBE_XPCS_ACCESS_V1,
.tx_max_fifo_size = 81920,
.rx_max_fifo_size = 81920,
};
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
{ .id = "AMDI8001",
.driver_data = (kernel_ulong_t)&xgbe_v1 },
{},
};
MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
#endif
#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a",
.data = &xgbe_v1 },
{},
};
MODULE_DEVICE_TABLE(of, xgbe_of_match);
#endif
static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
xgbe_platform_suspend, xgbe_platform_resume);
static struct platform_driver xgbe_driver = {
.driver = {
.name = "amd-xgbe",
#ifdef CONFIG_ACPI
.acpi_match_table = xgbe_acpi_match,
#endif
#ifdef CONFIG_OF
.of_match_table = xgbe_of_match,
#endif
.pm = &xgbe_platform_pm_ops,
},
.probe = xgbe_platform_probe,
.remove = xgbe_platform_remove,
};
int xgbe_platform_init(void)
{
return platform_driver_register(&xgbe_driver);
}
void xgbe_platform_exit(void)
{
platform_driver_unregister(&xgbe_driver);
}

View File

@ -129,7 +129,7 @@
#include <net/dcbnl.h>
#define XGBE_DRV_NAME "amd-xgbe"
#define XGBE_DRV_VERSION "1.0.2"
#define XGBE_DRV_VERSION "1.0.3"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
@ -158,7 +158,8 @@
#define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16
#define XGBE_DMA_STOP_TIMEOUT 5
#define XGBE_PRIORITY_QUEUES 8
#define XGBE_DMA_STOP_TIMEOUT 1
/* DMA cache settings - Outer sharable, write-back, write-allocate */
#define XGBE_DMA_OS_AXDOMAIN 0x2
@ -177,18 +178,19 @@
#define XGMAC_MAX_STD_PACKET 1518
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
#define XGMAC_PFC_DATA_LEN 46
#define XGMAC_PFC_DELAYS 14000
#define XGMAC_PRIO_QUEUES(_cnt) \
min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, (_cnt))
/* Common property names */
#define XGBE_MAC_ADDR_PROPERTY "mac-address"
#define XGBE_PHY_MODE_PROPERTY "phy-mode"
#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
#define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
@ -208,7 +210,12 @@
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
#define XGBE_FIFO_MAX 81920
#define XGMAC_FIFO_MIN_ALLOC 2048
#define XGMAC_FIFO_UNIT 256
#define XGMAC_FIFO_ALIGN(_x) \
(((_x) + XGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
#define XGMAC_FIFO_FC_OFF 2048
#define XGMAC_FIFO_FC_MIN 4096
#define XGBE_TC_MIN_QUANTUM 10
@ -233,6 +240,14 @@
/* Flow control queue count */
#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
/* Flow control threshold units */
#define XGMAC_FLOW_CONTROL_UNIT 512
#define XGMAC_FLOW_CONTROL_ALIGN(_x) \
(((_x) + XGMAC_FLOW_CONTROL_UNIT - 1) & ~(XGMAC_FLOW_CONTROL_UNIT - 1))
#define XGMAC_FLOW_CONTROL_VALUE(_x) \
(((_x) < 1024) ? 0 : ((_x) / XGMAC_FLOW_CONTROL_UNIT) - 2)
#define XGMAC_FLOW_CONTROL_MAX 33280
/* Maximum MAC address hash table size (256 bits = 8 bytes) */
#define XGBE_MAC_HASH_TABLE_SIZE 8
@ -244,46 +259,13 @@
/* Auto-negotiation */
#define XGBE_AN_MS_TIMEOUT 500
#define XGBE_LINK_TIMEOUT 10
#define XGBE_LINK_TIMEOUT 5
#define XGBE_AN_INT_CMPLT 0x01
#define XGBE_AN_INC_LINK 0x02
#define XGBE_AN_PG_RCV 0x04
#define XGBE_AN_INT_MASK 0x07
/* Rate-change complete wait/retry count */
#define XGBE_RATECHANGE_COUNT 500
/* Default SerDes settings */
#define XGBE_SPEED_10000_BLWC 0
#define XGBE_SPEED_10000_CDR 0x7
#define XGBE_SPEED_10000_PLL 0x1
#define XGBE_SPEED_10000_PQ 0x12
#define XGBE_SPEED_10000_RATE 0x0
#define XGBE_SPEED_10000_TXAMP 0xa
#define XGBE_SPEED_10000_WORD 0x7
#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
#define XGBE_SPEED_2500_BLWC 1
#define XGBE_SPEED_2500_CDR 0x2
#define XGBE_SPEED_2500_PLL 0x0
#define XGBE_SPEED_2500_PQ 0xa
#define XGBE_SPEED_2500_RATE 0x1
#define XGBE_SPEED_2500_TXAMP 0xf
#define XGBE_SPEED_2500_WORD 0x1
#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
#define XGBE_SPEED_1000_BLWC 1
#define XGBE_SPEED_1000_CDR 0x2
#define XGBE_SPEED_1000_PLL 0x0
#define XGBE_SPEED_1000_PQ 0xa
#define XGBE_SPEED_1000_RATE 0x3
#define XGBE_SPEED_1000_TXAMP 0xf
#define XGBE_SPEED_1000_WORD 0x1
#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
struct xgbe_prv_data;
@ -487,6 +469,18 @@ enum xgbe_speed {
XGBE_SPEEDS,
};
enum xgbe_xpcs_access {
XGBE_XPCS_ACCESS_V1 = 0,
XGBE_XPCS_ACCESS_V2,
};
enum xgbe_an_mode {
XGBE_AN_MODE_CL73 = 0,
XGBE_AN_MODE_CL37,
XGBE_AN_MODE_CL37_SGMII,
XGBE_AN_MODE_NONE,
};
enum xgbe_an {
XGBE_AN_READY = 0,
XGBE_AN_PAGE_RECEIVED,
@ -504,8 +498,10 @@ enum xgbe_rx {
};
enum xgbe_mode {
XGBE_MODE_KR = 0,
XGBE_MODE_KX,
XGBE_MODE_KX_1000 = 0,
XGBE_MODE_KX_2500,
XGBE_MODE_KR,
XGBE_MODE_UNKNOWN,
};
enum xgbe_speedset {
@ -601,9 +597,7 @@ struct xgbe_hw_if {
int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
int (*set_gmii_speed)(struct xgbe_prv_data *);
int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
int (*set_xgmii_speed)(struct xgbe_prv_data *);
int (*set_speed)(struct xgbe_prv_data *, int);
void (*enable_tx)(struct xgbe_prv_data *);
void (*disable_tx)(struct xgbe_prv_data *);
@ -684,9 +678,53 @@ struct xgbe_hw_if {
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
/* This structure represents implementation specific routines for an
* implementation of a PHY. All routines are required unless noted below.
* Optional routines:
* kr_training_pre, kr_training_post
*/
struct xgbe_phy_impl_if {
/* Perform Setup/teardown actions */
int (*init)(struct xgbe_prv_data *);
void (*exit)(struct xgbe_prv_data *);
/* Perform start/stop specific actions */
int (*reset)(struct xgbe_prv_data *);
int (*start)(struct xgbe_prv_data *);
void (*stop)(struct xgbe_prv_data *);
/* Return the link status */
int (*link_status)(struct xgbe_prv_data *);
/* Indicate if a particular speed is valid */
bool (*valid_speed)(struct xgbe_prv_data *, int);
/* Check if the specified mode can/should be used */
bool (*use_mode)(struct xgbe_prv_data *, enum xgbe_mode);
/* Switch the PHY into various modes */
void (*set_mode)(struct xgbe_prv_data *, enum xgbe_mode);
/* Retrieve mode needed for a specific speed */
enum xgbe_mode (*get_mode)(struct xgbe_prv_data *, int);
/* Retrieve new/next mode when trying to auto-negotiate */
enum xgbe_mode (*switch_mode)(struct xgbe_prv_data *);
/* Retrieve current mode */
enum xgbe_mode (*cur_mode)(struct xgbe_prv_data *);
/* Retrieve current auto-negotiation mode */
enum xgbe_an_mode (*an_mode)(struct xgbe_prv_data *);
/* Process results of auto-negotiation */
enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
/* Pre/Post KR training enablement support */
void (*kr_training_pre)(struct xgbe_prv_data *);
void (*kr_training_post)(struct xgbe_prv_data *);
};
struct xgbe_phy_if {
/* For initial PHY setup */
void (*phy_init)(struct xgbe_prv_data *);
/* For PHY setup/teardown */
int (*phy_init)(struct xgbe_prv_data *);
void (*phy_exit)(struct xgbe_prv_data *);
/* For PHY support when setting device up/down */
int (*phy_reset)(struct xgbe_prv_data *);
@ -696,6 +734,12 @@ struct xgbe_phy_if {
/* For PHY support while device is up */
void (*phy_status)(struct xgbe_prv_data *);
int (*phy_config_aneg)(struct xgbe_prv_data *);
/* For PHY settings validation */
bool (*phy_valid_speed)(struct xgbe_prv_data *, int);
/* PHY implementation specific services */
struct xgbe_phy_impl_if phy_impl;
};
struct xgbe_desc_if {
@ -755,11 +799,24 @@ struct xgbe_hw_features {
unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
};
struct xgbe_version_data {
void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *);
enum xgbe_xpcs_access xpcs_access;
unsigned int mmc_64bit;
unsigned int tx_max_fifo_size;
unsigned int rx_max_fifo_size;
};
struct xgbe_prv_data {
struct net_device *netdev;
struct platform_device *pdev;
struct platform_device *platdev;
struct acpi_device *adev;
struct device *dev;
struct platform_device *phy_platdev;
struct device *phy_dev;
/* Version related data */
struct xgbe_version_data *vdata;
/* ACPI or DT flag */
unsigned int use_acpi;
@ -776,6 +833,9 @@ struct xgbe_prv_data {
/* XPCS indirect addressing lock */
spinlock_t xpcs_lock;
unsigned int xpcs_window;
unsigned int xpcs_window_size;
unsigned int xpcs_window_mask;
/* RSS addressing mutex */
struct mutex rss_mutex;
@ -785,6 +845,7 @@ struct xgbe_prv_data {
int dev_irq;
unsigned int per_channel_irq;
int channel_irq[XGBE_MAX_DMA_CHANNELS];
struct xgbe_hw_if hw_if;
struct xgbe_phy_if phy_if;
@ -803,12 +864,16 @@ struct xgbe_prv_data {
/* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel;
unsigned int tx_max_channel_count;
unsigned int rx_max_channel_count;
unsigned int channel_count;
unsigned int tx_ring_count;
unsigned int tx_desc_count;
unsigned int rx_ring_count;
unsigned int rx_desc_count;
unsigned int tx_max_q_count;
unsigned int rx_max_q_count;
unsigned int tx_q_count;
unsigned int rx_q_count;
@ -820,11 +885,13 @@ struct xgbe_prv_data {
unsigned int tx_threshold;
unsigned int tx_pbl;
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
/* Rx settings */
unsigned int rx_sf_mode;
unsigned int rx_threshold;
unsigned int rx_pbl;
unsigned int rx_max_fifo_size;
/* Tx coalescing settings */
unsigned int tx_usecs;
@ -842,6 +909,8 @@ struct xgbe_prv_data {
unsigned int pause_autoneg;
unsigned int tx_pause;
unsigned int rx_pause;
unsigned int rx_rfa[XGBE_MAX_QUEUES];
unsigned int rx_rfd[XGBE_MAX_QUEUES];
/* Receive Side Scaling settings */
u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
@ -881,6 +950,8 @@ struct xgbe_prv_data {
struct ieee_pfc *pfc;
unsigned int q2tc_map[XGBE_MAX_QUEUES];
unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
unsigned int pfcq[XGBE_MAX_QUEUES];
unsigned int pfc_rfa;
u8 num_tcs;
/* Hardware features of the device */
@ -901,6 +972,8 @@ struct xgbe_prv_data {
int phy_speed;
/* MDIO/PHY related settings */
unsigned int phy_started;
void *phy_data;
struct xgbe_phy phy;
int mdio_mmd;
unsigned long link_check;
@ -911,23 +984,9 @@ struct xgbe_prv_data {
int an_irq;
struct work_struct an_irq_work;
unsigned int speed_set;
/* SerDes UEFI configurable settings.
* Switching between modes/speeds requires new values for some
* SerDes settings. The values can be supplied as device
* properties in array format. The first array entry is for
* 1GbE, second for 2.5GbE and third for 10GbE
*/
u32 serdes_blwc[XGBE_SPEEDS];
u32 serdes_cdr_rate[XGBE_SPEEDS];
u32 serdes_pq_skew[XGBE_SPEEDS];
u32 serdes_tx_amp[XGBE_SPEEDS];
u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
/* Auto-negotiation state machine support */
unsigned int an_int;
unsigned int an_status;
struct mutex an_mutex;
enum xgbe_an an_result;
enum xgbe_an an_state;
@ -938,6 +997,7 @@ struct xgbe_prv_data {
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
enum xgbe_an_mode an_mode;
unsigned int lpm_ctrl; /* CTRL1 for resume */
@ -952,9 +1012,18 @@ struct xgbe_prv_data {
};
/* Function prototypes*/
struct xgbe_prv_data *xgbe_alloc_pdata(struct device *);
void xgbe_free_pdata(struct xgbe_prv_data *);
void xgbe_set_counts(struct xgbe_prv_data *);
int xgbe_config_netdev(struct xgbe_prv_data *);
void xgbe_deconfig_netdev(struct xgbe_prv_data *);
int xgbe_platform_init(void);
void xgbe_platform_exit(void);
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
const struct net_device_ops *xgbe_get_netdev_ops(void);
const struct ethtool_ops *xgbe_get_ethtool_ops(void);