2018-08-22 06:02:19 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Renesas Ethernet AVB device driver
|
|
|
|
*
|
2019-08-16 23:17:02 +08:00
|
|
|
* Copyright (C) 2014-2019 Renesas Electronics Corporation
|
2015-06-11 06:01:43 +08:00
|
|
|
* Copyright (C) 2015 Renesas Solutions Corp.
|
2016-02-10 06:37:44 +08:00
|
|
|
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
|
2015-06-11 06:01:43 +08:00
|
|
|
*
|
|
|
|
* Based on the SuperH Ethernet driver
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/cache.h>
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/ethtool.h>
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/net_tstamp.h>
|
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_device.h>
|
|
|
|
#include <linux/of_irq.h>
|
|
|
|
#include <linux/of_mdio.h>
|
|
|
|
#include <linux/of_net.h>
|
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
2017-01-28 03:46:27 +08:00
|
|
|
#include <linux/sys_soc.h>
|
2015-06-11 06:01:43 +08:00
|
|
|
|
2015-11-21 03:29:39 +08:00
|
|
|
#include <asm/div64.h>
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
#include "ravb.h"
|
|
|
|
|
|
|
|
#define RAVB_DEF_MSG_ENABLE \
|
|
|
|
(NETIF_MSG_LINK | \
|
|
|
|
NETIF_MSG_TIMER | \
|
|
|
|
NETIF_MSG_RX_ERR | \
|
|
|
|
NETIF_MSG_TX_ERR)
|
|
|
|
|
2016-04-03 22:54:38 +08:00
|
|
|
static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
|
|
|
|
"ch0", /* RAVB_BE */
|
|
|
|
"ch1", /* RAVB_NC */
|
|
|
|
};
|
|
|
|
|
|
|
|
static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
|
|
|
|
"ch18", /* RAVB_BE */
|
|
|
|
"ch19", /* RAVB_NC */
|
|
|
|
};
|
|
|
|
|
2016-02-10 06:37:44 +08:00
|
|
|
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
|
|
|
|
u32 set)
|
|
|
|
{
|
|
|
|
ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
|
|
|
|
}
|
|
|
|
|
2015-06-11 06:02:30 +08:00
|
|
|
int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
|
2015-06-11 06:01:43 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 10000; i++) {
|
|
|
|
if ((ravb_read(ndev, reg) & mask) == value)
|
|
|
|
return 0;
|
|
|
|
udelay(10);
|
|
|
|
}
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ravb_config(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Set config mode */
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Check if the operating mode is changed to the config mode */
|
|
|
|
error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
|
|
|
|
if (error)
|
|
|
|
netdev_err(ndev, "failed to switch device to config mode\n");
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_set_rate(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
switch (priv->speed) {
|
|
|
|
case 100: /* 100BASE */
|
|
|
|
ravb_write(ndev, GECMR_SPEED_100, GECMR);
|
|
|
|
break;
|
|
|
|
case 1000: /* 1000BASE */
|
|
|
|
ravb_write(ndev, GECMR_SPEED_1000, GECMR);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_set_buffer_align(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
|
|
|
|
|
|
|
|
if (reserve)
|
|
|
|
skb_reserve(skb, RAVB_ALIGN - reserve);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get MAC address from the MAC address registers
|
|
|
|
*
|
|
|
|
* Ethernet AVB device doesn't have ROM for MAC address.
|
|
|
|
* This function gets the MAC address that was used by a bootloader.
|
|
|
|
*/
|
of: net: pass the dst buffer to of_get_mac_address()
of_get_mac_address() returns a "const void*" pointer to a MAC address.
Lately, support to fetch the MAC address by an NVMEM provider was added.
But this will only work with platform devices. It will not work with
PCI devices (e.g. of an integrated root complex) and esp. not with DSA
ports.
There is an of_* variant of the nvmem binding which works without
devices. The returned data of a nvmem_cell_read() has to be freed after
use. On the other hand the return of_get_mac_address() points to some
static data without a lifetime. The trick for now, was to allocate a
device resource managed buffer which is then returned. This will only
work if we have an actual device.
Change it, so that the caller of of_get_mac_address() has to supply a
buffer where the MAC address is written to. Unfortunately, this will
touch all drivers which use the of_get_mac_address().
Usually the code looks like:
const char *addr;
addr = of_get_mac_address(np);
if (!IS_ERR(addr))
ether_addr_copy(ndev->dev_addr, addr);
This can then be simply rewritten as:
of_get_mac_address(np, ndev->dev_addr);
Sometimes is_valid_ether_addr() is used to test the MAC address.
of_get_mac_address() already makes sure, it just returns a valid MAC
address. Thus we can just test its return code. But we have to be
careful if there are still other sources for the MAC address before the
of_get_mac_address(). In this case we have to keep the
is_valid_ether_addr() call.
The following coccinelle patch was used to convert common cases to the
new style. Afterwards, I've manually gone over the drivers and fixed the
return code variable: either used a new one or if one was already
available use that. Mansour Moufid, thanks for that coccinelle patch!
<spml>
@a@
identifier x;
expression y, z;
@@
- x = of_get_mac_address(y);
+ x = of_get_mac_address(y, z);
<...
- ether_addr_copy(z, x);
...>
@@
identifier a.x;
@@
- if (<+... x ...+>) {}
@@
identifier a.x;
@@
if (<+... x ...+>) {
...
}
- else {}
@@
identifier a.x;
expression e;
@@
- if (<+... x ...+>@e)
- {}
- else
+ if (!(e))
{...}
@@
expression x, y, z;
@@
- x = of_get_mac_address(y, z);
+ of_get_mac_address(y, z);
... when != x
</spml>
All drivers, except drivers/net/ethernet/aeroflex/greth.c, were
compile-time tested.
Suggested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Michael Walle <michael@walle.cc>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 01:47:17 +08:00
|
|
|
static void ravb_read_mac_address(struct device_node *np,
|
|
|
|
struct net_device *ndev)
|
2015-06-11 06:01:43 +08:00
|
|
|
{
|
of: net: pass the dst buffer to of_get_mac_address()
of_get_mac_address() returns a "const void*" pointer to a MAC address.
Lately, support to fetch the MAC address by an NVMEM provider was added.
But this will only work with platform devices. It will not work with
PCI devices (e.g. of an integrated root complex) and esp. not with DSA
ports.
There is an of_* variant of the nvmem binding which works without
devices. The returned data of a nvmem_cell_read() has to be freed after
use. On the other hand the return of_get_mac_address() points to some
static data without a lifetime. The trick for now, was to allocate a
device resource managed buffer which is then returned. This will only
work if we have an actual device.
Change it, so that the caller of of_get_mac_address() has to supply a
buffer where the MAC address is written to. Unfortunately, this will
touch all drivers which use the of_get_mac_address().
Usually the code looks like:
const char *addr;
addr = of_get_mac_address(np);
if (!IS_ERR(addr))
ether_addr_copy(ndev->dev_addr, addr);
This can then be simply rewritten as:
of_get_mac_address(np, ndev->dev_addr);
Sometimes is_valid_ether_addr() is used to test the MAC address.
of_get_mac_address() already makes sure, it just returns a valid MAC
address. Thus we can just test its return code. But we have to be
careful if there are still other sources for the MAC address before the
of_get_mac_address(). In this case we have to keep the
is_valid_ether_addr() call.
The following coccinelle patch was used to convert common cases to the
new style. Afterwards, I've manually gone over the drivers and fixed the
return code variable: either used a new one or if one was already
available use that. Mansour Moufid, thanks for that coccinelle patch!
<spml>
@a@
identifier x;
expression y, z;
@@
- x = of_get_mac_address(y);
+ x = of_get_mac_address(y, z);
<...
- ether_addr_copy(z, x);
...>
@@
identifier a.x;
@@
- if (<+... x ...+>) {}
@@
identifier a.x;
@@
if (<+... x ...+>) {
...
}
- else {}
@@
identifier a.x;
expression e;
@@
- if (<+... x ...+>@e)
- {}
- else
+ if (!(e))
{...}
@@
expression x, y, z;
@@
- x = of_get_mac_address(y, z);
+ of_get_mac_address(y, z);
... when != x
</spml>
All drivers, except drivers/net/ethernet/aeroflex/greth.c, were
compile-time tested.
Suggested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Michael Walle <michael@walle.cc>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 01:47:17 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = of_get_mac_address(np, ndev->dev_addr);
|
|
|
|
if (ret) {
|
2015-12-05 05:58:07 +08:00
|
|
|
u32 mahr = ravb_read(ndev, MAHR);
|
|
|
|
u32 malr = ravb_read(ndev, MALR);
|
|
|
|
|
|
|
|
ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
|
|
|
|
ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
|
|
|
|
ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
|
|
|
|
ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
|
|
|
|
ndev->dev_addr[4] = (malr >> 8) & 0xFF;
|
|
|
|
ndev->dev_addr[5] = (malr >> 0) & 0xFF;
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = container_of(ctrl, struct ravb_private,
|
|
|
|
mdiobb);
|
|
|
|
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* MDC pin control */
|
|
|
|
static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
|
|
|
|
{
|
|
|
|
ravb_mdio_ctrl(ctrl, PIR_MDC, level);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Data I/O pin control */
|
|
|
|
static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
|
|
|
|
{
|
|
|
|
ravb_mdio_ctrl(ctrl, PIR_MMD, output);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set data bit */
|
|
|
|
static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
|
|
|
|
{
|
|
|
|
ravb_mdio_ctrl(ctrl, PIR_MDO, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get data bit */
|
|
|
|
static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = container_of(ctrl, struct ravb_private,
|
|
|
|
mdiobb);
|
|
|
|
|
|
|
|
return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* MDIO bus control struct */
|
2020-08-27 06:56:04 +08:00
|
|
|
static const struct mdiobb_ops bb_ops = {
|
2015-06-11 06:01:43 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.set_mdc = ravb_set_mdc,
|
|
|
|
.set_mdio_dir = ravb_set_mdio_dir,
|
|
|
|
.set_mdio_data = ravb_set_mdio_data,
|
|
|
|
.get_mdio_data = ravb_get_mdio_data,
|
|
|
|
};
|
|
|
|
|
2017-01-26 21:29:27 +08:00
|
|
|
/* Free TX skb function for AVB-IP */
|
|
|
|
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
struct net_device_stats *stats = &priv->stats[q];
|
2018-09-19 16:06:21 +08:00
|
|
|
int num_tx_desc = priv->num_tx_desc;
|
2017-01-26 21:29:27 +08:00
|
|
|
struct ravb_tx_desc *desc;
|
|
|
|
int free_num = 0;
|
|
|
|
int entry;
|
|
|
|
u32 size;
|
|
|
|
|
|
|
|
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
|
|
|
|
bool txed;
|
|
|
|
|
|
|
|
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
|
2018-09-19 16:06:21 +08:00
|
|
|
num_tx_desc);
|
2017-01-26 21:29:27 +08:00
|
|
|
desc = &priv->tx_ring[q][entry];
|
|
|
|
txed = desc->die_dt == DT_FEMPTY;
|
|
|
|
if (free_txed_only && !txed)
|
|
|
|
break;
|
|
|
|
/* Descriptor type must be checked before all other reads */
|
|
|
|
dma_rmb();
|
|
|
|
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
|
|
|
|
/* Free the original skb. */
|
2018-09-19 16:06:21 +08:00
|
|
|
if (priv->tx_skb[q][entry / num_tx_desc]) {
|
2017-01-26 21:29:27 +08:00
|
|
|
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
|
|
|
size, DMA_TO_DEVICE);
|
|
|
|
/* Last packet descriptor? */
|
2018-09-19 16:06:21 +08:00
|
|
|
if (entry % num_tx_desc == num_tx_desc - 1) {
|
|
|
|
entry /= num_tx_desc;
|
2017-01-26 21:29:27 +08:00
|
|
|
dev_kfree_skb_any(priv->tx_skb[q][entry]);
|
|
|
|
priv->tx_skb[q][entry] = NULL;
|
|
|
|
if (txed)
|
|
|
|
stats->tx_packets++;
|
|
|
|
}
|
|
|
|
free_num++;
|
|
|
|
}
|
|
|
|
if (txed)
|
|
|
|
stats->tx_bytes += size;
|
|
|
|
desc->die_dt = DT_EEMPTY;
|
|
|
|
}
|
|
|
|
return free_num;
|
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Free skb's and DMA buffers for Ethernet AVB */
|
|
|
|
static void ravb_ring_free(struct net_device *ndev, int q)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2018-09-19 16:06:21 +08:00
|
|
|
int num_tx_desc = priv->num_tx_desc;
|
2015-06-11 06:01:43 +08:00
|
|
|
int ring_size;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (priv->rx_ring[q]) {
|
2017-01-26 21:29:27 +08:00
|
|
|
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
|
|
|
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
|
|
|
|
|
|
|
|
if (!dma_mapping_error(ndev->dev.parent,
|
|
|
|
le32_to_cpu(desc->dptr)))
|
|
|
|
dma_unmap_single(ndev->dev.parent,
|
|
|
|
le32_to_cpu(desc->dptr),
|
2019-11-14 09:49:49 +08:00
|
|
|
RX_BUF_SZ,
|
2017-01-26 21:29:27 +08:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
ring_size = sizeof(struct ravb_ex_rx_desc) *
|
|
|
|
(priv->num_rx_ring[q] + 1);
|
2015-09-30 14:15:53 +08:00
|
|
|
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
|
2015-06-11 06:01:43 +08:00
|
|
|
priv->rx_desc_dma[q]);
|
|
|
|
priv->rx_ring[q] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->tx_ring[q]) {
|
2017-01-26 21:29:27 +08:00
|
|
|
ravb_tx_free(ndev, q, false);
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
ring_size = sizeof(struct ravb_tx_desc) *
|
2018-09-19 16:06:21 +08:00
|
|
|
(priv->num_tx_ring[q] * num_tx_desc + 1);
|
2015-09-30 14:15:53 +08:00
|
|
|
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
|
2015-06-11 06:01:43 +08:00
|
|
|
priv->tx_desc_dma[q]);
|
|
|
|
priv->tx_ring[q] = NULL;
|
|
|
|
}
|
2017-01-26 21:29:27 +08:00
|
|
|
|
2017-06-06 06:08:10 +08:00
|
|
|
/* Free RX skb ringbuffer */
|
|
|
|
if (priv->rx_skb[q]) {
|
|
|
|
for (i = 0; i < priv->num_rx_ring[q]; i++)
|
|
|
|
dev_kfree_skb(priv->rx_skb[q][i]);
|
|
|
|
}
|
|
|
|
kfree(priv->rx_skb[q]);
|
|
|
|
priv->rx_skb[q] = NULL;
|
|
|
|
|
|
|
|
/* Free aligned TX buffers */
|
|
|
|
kfree(priv->tx_align[q]);
|
|
|
|
priv->tx_align[q] = NULL;
|
|
|
|
|
2017-01-26 21:29:27 +08:00
|
|
|
/* Free TX skb ringbuffer.
|
|
|
|
* SKBs are freed by ravb_tx_free() call above.
|
|
|
|
*/
|
|
|
|
kfree(priv->tx_skb[q]);
|
|
|
|
priv->tx_skb[q] = NULL;
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Format skb and descriptor buffer for Ethernet AVB */
|
|
|
|
static void ravb_ring_format(struct net_device *ndev, int q)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2018-09-19 16:06:21 +08:00
|
|
|
int num_tx_desc = priv->num_tx_desc;
|
2015-07-11 02:10:10 +08:00
|
|
|
struct ravb_ex_rx_desc *rx_desc;
|
|
|
|
struct ravb_tx_desc *tx_desc;
|
|
|
|
struct ravb_desc *desc;
|
2015-06-11 06:01:43 +08:00
|
|
|
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
|
2015-07-26 04:42:01 +08:00
|
|
|
int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
|
2018-09-19 16:06:21 +08:00
|
|
|
num_tx_desc;
|
2015-06-11 06:01:43 +08:00
|
|
|
dma_addr_t dma_addr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
priv->cur_rx[q] = 0;
|
|
|
|
priv->cur_tx[q] = 0;
|
|
|
|
priv->dirty_rx[q] = 0;
|
|
|
|
priv->dirty_tx[q] = 0;
|
|
|
|
|
|
|
|
memset(priv->rx_ring[q], 0, rx_ring_size);
|
|
|
|
/* Build RX ring buffer */
|
|
|
|
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
|
|
|
/* RX descriptor */
|
|
|
|
rx_desc = &priv->rx_ring[q][i];
|
2019-11-14 09:49:49 +08:00
|
|
|
rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
|
2015-09-30 14:15:53 +08:00
|
|
|
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
|
2019-11-14 09:49:49 +08:00
|
|
|
RX_BUF_SZ,
|
2015-06-11 06:01:43 +08:00
|
|
|
DMA_FROM_DEVICE);
|
2015-07-22 06:31:59 +08:00
|
|
|
/* We just set the data size to 0 for a failed mapping which
|
|
|
|
* should prevent DMA from happening...
|
|
|
|
*/
|
2015-09-30 14:15:53 +08:00
|
|
|
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
2015-07-22 06:31:59 +08:00
|
|
|
rx_desc->ds_cc = cpu_to_le16(0);
|
2015-06-11 06:01:43 +08:00
|
|
|
rx_desc->dptr = cpu_to_le32(dma_addr);
|
|
|
|
rx_desc->die_dt = DT_FEMPTY;
|
|
|
|
}
|
|
|
|
rx_desc = &priv->rx_ring[q][i];
|
|
|
|
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
|
|
|
|
rx_desc->die_dt = DT_LINKFIX; /* type */
|
|
|
|
|
|
|
|
memset(priv->tx_ring[q], 0, tx_ring_size);
|
|
|
|
/* Build TX ring buffer */
|
2015-07-26 04:42:01 +08:00
|
|
|
for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
|
|
|
|
i++, tx_desc++) {
|
|
|
|
tx_desc->die_dt = DT_EEMPTY;
|
2018-09-19 16:06:21 +08:00
|
|
|
if (num_tx_desc > 1) {
|
|
|
|
tx_desc++;
|
|
|
|
tx_desc->die_dt = DT_EEMPTY;
|
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
|
|
|
|
tx_desc->die_dt = DT_LINKFIX; /* type */
|
|
|
|
|
|
|
|
/* RX descriptor base address for best effort */
|
|
|
|
desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
|
|
|
|
desc->die_dt = DT_LINKFIX; /* type */
|
|
|
|
desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
|
|
|
|
|
|
|
|
/* TX descriptor base address for best effort */
|
|
|
|
desc = &priv->desc_bat[q];
|
|
|
|
desc->die_dt = DT_LINKFIX; /* type */
|
|
|
|
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Init skb and descriptor buffer for Ethernet AVB */
|
|
|
|
static int ravb_ring_init(struct net_device *ndev, int q)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2018-09-19 16:06:21 +08:00
|
|
|
int num_tx_desc = priv->num_tx_desc;
|
2015-07-22 06:31:59 +08:00
|
|
|
struct sk_buff *skb;
|
2015-06-11 06:01:43 +08:00
|
|
|
int ring_size;
|
2015-07-22 06:31:59 +08:00
|
|
|
int i;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
/* Allocate RX and TX skb rings */
|
|
|
|
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
|
|
|
|
sizeof(*priv->rx_skb[q]), GFP_KERNEL);
|
|
|
|
priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
|
|
|
|
sizeof(*priv->tx_skb[q]), GFP_KERNEL);
|
|
|
|
if (!priv->rx_skb[q] || !priv->tx_skb[q])
|
|
|
|
goto error;
|
|
|
|
|
2015-07-22 06:31:59 +08:00
|
|
|
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
2019-11-14 09:49:49 +08:00
|
|
|
skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
|
2015-07-22 06:31:59 +08:00
|
|
|
if (!skb)
|
|
|
|
goto error;
|
|
|
|
ravb_set_buffer_align(skb);
|
|
|
|
priv->rx_skb[q][i] = skb;
|
|
|
|
}
|
|
|
|
|
2018-09-19 16:06:21 +08:00
|
|
|
if (num_tx_desc > 1) {
|
|
|
|
/* Allocate rings for the aligned buffers */
|
|
|
|
priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
|
|
|
|
DPTR_ALIGN - 1, GFP_KERNEL);
|
|
|
|
if (!priv->tx_align[q])
|
|
|
|
goto error;
|
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
/* Allocate all RX descriptors. */
|
|
|
|
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
|
2015-09-30 14:15:53 +08:00
|
|
|
priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
|
2015-06-11 06:01:43 +08:00
|
|
|
&priv->rx_desc_dma[q],
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!priv->rx_ring[q])
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
priv->dirty_rx[q] = 0;
|
|
|
|
|
|
|
|
/* Allocate all TX descriptors. */
|
2015-07-26 04:42:01 +08:00
|
|
|
ring_size = sizeof(struct ravb_tx_desc) *
|
2018-09-19 16:06:21 +08:00
|
|
|
(priv->num_tx_ring[q] * num_tx_desc + 1);
|
2015-09-30 14:15:53 +08:00
|
|
|
priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
|
2015-06-11 06:01:43 +08:00
|
|
|
&priv->tx_desc_dma[q],
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!priv->tx_ring[q])
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
ravb_ring_free(ndev, q);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* E-MAC init function */
|
|
|
|
static void ravb_emac_init(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
/* Receive frame limit set register */
|
|
|
|
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
|
|
|
|
|
2017-10-04 15:54:27 +08:00
|
|
|
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
|
2018-11-21 19:21:26 +08:00
|
|
|
ravb_write(ndev, ECMR_ZPF | ECMR_DM |
|
2017-10-04 15:54:27 +08:00
|
|
|
(ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
|
2016-01-11 05:27:38 +08:00
|
|
|
ECMR_TE | ECMR_RE, ECMR);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
ravb_set_rate(ndev);
|
|
|
|
|
|
|
|
/* Set MAC address */
|
|
|
|
ravb_write(ndev,
|
|
|
|
(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
|
|
|
|
(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
|
|
|
|
ravb_write(ndev,
|
|
|
|
(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
|
|
|
|
|
|
|
|
/* E-MAC status register clear */
|
|
|
|
ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
|
|
|
|
|
|
|
|
/* E-MAC interrupt enable register */
|
|
|
|
ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Device init function for Ethernet AVB */
|
|
|
|
static int ravb_dmac_init(struct net_device *ndev)
|
|
|
|
{
|
2016-04-03 22:54:38 +08:00
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2015-06-11 06:01:43 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Set CONFIG mode */
|
|
|
|
error = ravb_config(ndev);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
error = ravb_ring_init(ndev, RAVB_BE);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
error = ravb_ring_init(ndev, RAVB_NC);
|
|
|
|
if (error) {
|
|
|
|
ravb_ring_free(ndev, RAVB_BE);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Descriptor format */
|
|
|
|
ravb_ring_format(ndev, RAVB_BE);
|
|
|
|
ravb_ring_format(ndev, RAVB_NC);
|
|
|
|
|
|
|
|
/* Set AVB RX */
|
2016-06-01 02:01:28 +08:00
|
|
|
ravb_write(ndev,
|
|
|
|
RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
/* Set FIFO size */
|
2019-03-07 18:24:47 +08:00
|
|
|
ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
/* Timestamp enable */
|
|
|
|
ravb_write(ndev, TCCR_TFEN, TCCR);
|
|
|
|
|
2015-12-15 00:24:58 +08:00
|
|
|
/* Interrupt init: */
|
2016-04-03 22:54:38 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN3) {
|
|
|
|
/* Clear DIL.DPLx */
|
|
|
|
ravb_write(ndev, 0, DIL);
|
|
|
|
/* Set queue specific interrupt */
|
|
|
|
ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
|
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Frame receive */
|
|
|
|
ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
|
2015-12-15 00:24:58 +08:00
|
|
|
/* Disable FIFO full warning */
|
|
|
|
ravb_write(ndev, 0, RIC1);
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Receive FIFO full error, descriptor empty */
|
|
|
|
ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
|
|
|
|
/* Frame transmitted, timestamp FIFO updated */
|
|
|
|
ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
|
|
|
|
|
|
|
|
/* Setting the control will start the AVB-DMAC process. */
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_get_tx_tstamp(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
|
|
|
|
struct skb_shared_hwtstamps shhwtstamps;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct timespec64 ts;
|
|
|
|
u16 tag, tfa_tag;
|
|
|
|
int count;
|
|
|
|
u32 tfa2;
|
|
|
|
|
|
|
|
count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
|
|
|
|
while (count--) {
|
|
|
|
tfa2 = ravb_read(ndev, TFA2);
|
|
|
|
tfa_tag = (tfa2 & TFA2_TST) >> 16;
|
|
|
|
ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
|
|
|
|
ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
|
|
|
|
ravb_read(ndev, TFA1);
|
|
|
|
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
|
|
|
shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
|
|
|
|
list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
|
|
|
|
list) {
|
|
|
|
skb = ts_skb->skb;
|
|
|
|
tag = ts_skb->tag;
|
|
|
|
list_del(&ts_skb->list);
|
|
|
|
kfree(ts_skb);
|
|
|
|
if (tag == tfa_tag) {
|
|
|
|
skb_tstamp_tx(skb, &shhwtstamps);
|
2019-08-16 23:17:02 +08:00
|
|
|
dev_consume_skb_any(skb);
|
2015-06-11 06:01:43 +08:00
|
|
|
break;
|
2019-08-16 23:17:02 +08:00
|
|
|
} else {
|
|
|
|
dev_kfree_skb_any(skb);
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
}
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-04 15:54:27 +08:00
|
|
|
static void ravb_rx_csum(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
u8 *hw_csum;
|
|
|
|
|
2019-01-23 19:14:52 +08:00
|
|
|
/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
|
|
|
|
* appended to packet data
|
|
|
|
*/
|
|
|
|
if (unlikely(skb->len < sizeof(__sum16)))
|
2017-10-04 15:54:27 +08:00
|
|
|
return;
|
2019-01-23 19:14:52 +08:00
|
|
|
hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
|
2017-10-04 15:54:27 +08:00
|
|
|
skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
|
|
|
|
skb->ip_summed = CHECKSUM_COMPLETE;
|
2019-01-23 19:14:52 +08:00
|
|
|
skb_trim(skb, skb->len - sizeof(__sum16));
|
2017-10-04 15:54:27 +08:00
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Packet receive function for Ethernet AVB */
|
|
|
|
static bool ravb_rx(struct net_device *ndev, int *quota, int q)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
|
|
|
int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
|
|
|
|
priv->cur_rx[q];
|
|
|
|
struct net_device_stats *stats = &priv->stats[q];
|
|
|
|
struct ravb_ex_rx_desc *desc;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
struct timespec64 ts;
|
|
|
|
u8 desc_status;
|
2015-07-11 02:10:10 +08:00
|
|
|
u16 pkt_len;
|
2015-06-11 06:01:43 +08:00
|
|
|
int limit;
|
|
|
|
|
|
|
|
boguscnt = min(boguscnt, *quota);
|
|
|
|
limit = boguscnt;
|
|
|
|
desc = &priv->rx_ring[q][entry];
|
|
|
|
while (desc->die_dt != DT_FEMPTY) {
|
|
|
|
/* Descriptor type must be checked before all other reads */
|
|
|
|
dma_rmb();
|
|
|
|
desc_status = desc->msc;
|
|
|
|
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
|
|
|
|
|
|
|
if (--boguscnt < 0)
|
|
|
|
break;
|
|
|
|
|
2015-07-22 06:31:59 +08:00
|
|
|
/* We use 0-byte descriptors to mark the DMA mapping errors */
|
|
|
|
if (!pkt_len)
|
|
|
|
continue;
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
if (desc_status & MSC_MC)
|
|
|
|
stats->multicast++;
|
|
|
|
|
|
|
|
if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
|
|
|
|
MSC_CEEF)) {
|
|
|
|
stats->rx_errors++;
|
|
|
|
if (desc_status & MSC_CRC)
|
|
|
|
stats->rx_crc_errors++;
|
|
|
|
if (desc_status & MSC_RFE)
|
|
|
|
stats->rx_frame_errors++;
|
|
|
|
if (desc_status & (MSC_RTLF | MSC_RTSF))
|
|
|
|
stats->rx_length_errors++;
|
|
|
|
if (desc_status & MSC_CEEF)
|
|
|
|
stats->rx_missed_errors++;
|
|
|
|
} else {
|
|
|
|
u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
|
|
|
|
|
|
|
|
skb = priv->rx_skb[q][entry];
|
|
|
|
priv->rx_skb[q][entry] = NULL;
|
2015-09-30 14:15:53 +08:00
|
|
|
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
2019-11-14 09:49:49 +08:00
|
|
|
RX_BUF_SZ,
|
2015-07-15 05:56:52 +08:00
|
|
|
DMA_FROM_DEVICE);
|
2015-06-11 06:01:43 +08:00
|
|
|
get_ts &= (q == RAVB_NC) ?
|
|
|
|
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
|
|
|
|
~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
|
|
|
|
if (get_ts) {
|
|
|
|
struct skb_shared_hwtstamps *shhwtstamps;
|
|
|
|
|
|
|
|
shhwtstamps = skb_hwtstamps(skb);
|
|
|
|
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
|
|
|
|
ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
|
|
|
|
32) | le32_to_cpu(desc->ts_sl);
|
|
|
|
ts.tv_nsec = le32_to_cpu(desc->ts_n);
|
|
|
|
shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
|
|
|
|
}
|
2017-10-04 15:54:27 +08:00
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
skb_put(skb, pkt_len);
|
|
|
|
skb->protocol = eth_type_trans(skb, ndev);
|
2017-10-04 15:54:27 +08:00
|
|
|
if (ndev->features & NETIF_F_RXCSUM)
|
|
|
|
ravb_rx_csum(skb);
|
2015-06-11 06:01:43 +08:00
|
|
|
napi_gro_receive(&priv->napi[q], skb);
|
|
|
|
stats->rx_packets++;
|
|
|
|
stats->rx_bytes += pkt_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
|
|
|
|
desc = &priv->rx_ring[q][entry];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Refill the RX ring buffers. */
|
|
|
|
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
|
|
|
|
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
|
|
|
|
desc = &priv->rx_ring[q][entry];
|
2019-11-14 09:49:49 +08:00
|
|
|
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
if (!priv->rx_skb[q][entry]) {
|
|
|
|
skb = netdev_alloc_skb(ndev,
|
2019-11-14 09:49:49 +08:00
|
|
|
RX_BUF_SZ +
|
2018-02-17 00:10:08 +08:00
|
|
|
RAVB_ALIGN - 1);
|
2015-06-11 06:01:43 +08:00
|
|
|
if (!skb)
|
|
|
|
break; /* Better luck next round. */
|
|
|
|
ravb_set_buffer_align(skb);
|
2015-09-30 14:15:53 +08:00
|
|
|
dma_addr = dma_map_single(ndev->dev.parent, skb->data,
|
2015-06-11 06:01:43 +08:00
|
|
|
le16_to_cpu(desc->ds_cc),
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
skb_checksum_none_assert(skb);
|
2015-07-22 06:31:59 +08:00
|
|
|
/* We just set the data size to 0 for a failed mapping
|
|
|
|
* which should prevent DMA from happening...
|
|
|
|
*/
|
2015-09-30 14:15:53 +08:00
|
|
|
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
2015-07-22 06:31:59 +08:00
|
|
|
desc->ds_cc = cpu_to_le16(0);
|
2015-06-11 06:01:43 +08:00
|
|
|
desc->dptr = cpu_to_le32(dma_addr);
|
|
|
|
priv->rx_skb[q][entry] = skb;
|
|
|
|
}
|
|
|
|
/* Descriptor type must be set after all the above writes */
|
|
|
|
dma_wmb();
|
|
|
|
desc->die_dt = DT_FEMPTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
*quota -= limit - (++boguscnt);
|
|
|
|
|
|
|
|
return boguscnt <= 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_rcv_snd_disable(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
/* Disable TX and RX */
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_rcv_snd_enable(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
/* Enable TX and RX */
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* function for waiting dma process finished */
|
|
|
|
static int ravb_stop_dma(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Wait for stopping the hardware TX process */
|
|
|
|
error = ravb_wait(ndev, TCCR,
|
|
|
|
TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
|
|
|
|
0);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/* Stop the E-MAC's RX/TX processes. */
|
|
|
|
ravb_rcv_snd_disable(ndev);
|
|
|
|
|
|
|
|
/* Wait for stopping the RX DMA process */
|
|
|
|
error = ravb_wait(ndev, CSR, CSR_RPO, 0);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/* Stop AVB-DMAC process */
|
|
|
|
return ravb_config(ndev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* E-MAC interrupt handler */
|
2016-04-03 22:54:38 +08:00
|
|
|
static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
|
2015-06-11 06:01:43 +08:00
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
u32 ecsr, psr;
|
|
|
|
|
|
|
|
ecsr = ravb_read(ndev, ECSR);
|
|
|
|
ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
|
2017-08-01 18:14:36 +08:00
|
|
|
|
|
|
|
if (ecsr & ECSR_MPD)
|
|
|
|
pm_wakeup_event(&priv->pdev->dev, 0);
|
2015-06-11 06:01:43 +08:00
|
|
|
if (ecsr & ECSR_ICD)
|
|
|
|
ndev->stats.tx_carrier_errors++;
|
|
|
|
if (ecsr & ECSR_LCHNG) {
|
|
|
|
/* Link changed */
|
|
|
|
if (priv->no_avb_link)
|
|
|
|
return;
|
|
|
|
psr = ravb_read(ndev, PSR);
|
|
|
|
if (priv->avb_link_active_low)
|
|
|
|
psr ^= PSR_LMON;
|
|
|
|
if (!(psr & PSR_LMON)) {
|
|
|
|
/* DIsable RX and TX */
|
|
|
|
ravb_rcv_snd_disable(ndev);
|
|
|
|
} else {
|
|
|
|
/* Enable RX and TX */
|
|
|
|
ravb_rcv_snd_enable(ndev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-03 22:54:38 +08:00
|
|
|
static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = dev_id;
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
spin_lock(&priv->lock);
|
|
|
|
ravb_emac_interrupt_unlocked(ndev);
|
|
|
|
spin_unlock(&priv->lock);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Error interrupt handler */
|
|
|
|
static void ravb_error_interrupt(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
u32 eis, ris2;
|
|
|
|
|
|
|
|
eis = ravb_read(ndev, EIS);
|
2018-09-18 18:22:26 +08:00
|
|
|
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
|
2015-06-11 06:01:43 +08:00
|
|
|
if (eis & EIS_QFS) {
|
|
|
|
ris2 = ravb_read(ndev, RIS2);
|
2018-09-18 18:22:26 +08:00
|
|
|
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
|
|
|
|
RIS2);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
/* Receive Descriptor Empty int */
|
|
|
|
if (ris2 & RIS2_QFF0)
|
|
|
|
priv->stats[RAVB_BE].rx_over_errors++;
|
|
|
|
|
|
|
|
/* Receive Descriptor Empty int */
|
|
|
|
if (ris2 & RIS2_QFF1)
|
|
|
|
priv->stats[RAVB_NC].rx_over_errors++;
|
|
|
|
|
|
|
|
/* Receive FIFO Overflow int */
|
|
|
|
if (ris2 & RIS2_RFFF)
|
|
|
|
priv->rx_fifo_errors++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-03 22:54:38 +08:00
|
|
|
static bool ravb_queue_interrupt(struct net_device *ndev, int q)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
u32 ris0 = ravb_read(ndev, RIS0);
|
|
|
|
u32 ric0 = ravb_read(ndev, RIC0);
|
|
|
|
u32 tis = ravb_read(ndev, TIS);
|
|
|
|
u32 tic = ravb_read(ndev, TIC);
|
|
|
|
|
|
|
|
if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
|
|
|
|
if (napi_schedule_prep(&priv->napi[q])) {
|
|
|
|
/* Mask RX and TX interrupts */
|
|
|
|
if (priv->chip_id == RCAR_GEN2) {
|
|
|
|
ravb_write(ndev, ric0 & ~BIT(q), RIC0);
|
|
|
|
ravb_write(ndev, tic & ~BIT(q), TIC);
|
|
|
|
} else {
|
|
|
|
ravb_write(ndev, BIT(q), RID0);
|
|
|
|
ravb_write(ndev, BIT(q), TID);
|
|
|
|
}
|
|
|
|
__napi_schedule(&priv->napi[q]);
|
|
|
|
} else {
|
|
|
|
netdev_warn(ndev,
|
|
|
|
"ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
|
|
|
|
ris0, ric0);
|
|
|
|
netdev_warn(ndev,
|
|
|
|
" tx status 0x%08x, tx mask 0x%08x.\n",
|
|
|
|
tis, tic);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ravb_timestamp_interrupt(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
u32 tis = ravb_read(ndev, TIS);
|
|
|
|
|
|
|
|
if (tis & TIS_TFUF) {
|
2018-09-18 18:22:26 +08:00
|
|
|
ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
|
2016-04-03 22:54:38 +08:00
|
|
|
ravb_get_tx_tstamp(ndev);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
static irqreturn_t ravb_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = dev_id;
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
irqreturn_t result = IRQ_NONE;
|
|
|
|
u32 iss;
|
|
|
|
|
|
|
|
spin_lock(&priv->lock);
|
|
|
|
/* Get interrupt status */
|
|
|
|
iss = ravb_read(ndev, ISS);
|
|
|
|
|
|
|
|
/* Received and transmitted interrupts */
|
|
|
|
if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
|
|
|
|
int q;
|
|
|
|
|
|
|
|
/* Timestamp updated */
|
2016-04-03 22:54:38 +08:00
|
|
|
if (ravb_timestamp_interrupt(ndev))
|
2015-06-11 06:01:43 +08:00
|
|
|
result = IRQ_HANDLED;
|
|
|
|
|
|
|
|
/* Network control and best effort queue RX/TX */
|
|
|
|
for (q = RAVB_NC; q >= RAVB_BE; q--) {
|
2016-04-03 22:54:38 +08:00
|
|
|
if (ravb_queue_interrupt(ndev, q))
|
2015-06-11 06:01:43 +08:00
|
|
|
result = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* E-MAC status summary */
|
|
|
|
if (iss & ISS_MS) {
|
2016-04-03 22:54:38 +08:00
|
|
|
ravb_emac_interrupt_unlocked(ndev);
|
2015-06-11 06:01:43 +08:00
|
|
|
result = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Error status summary */
|
|
|
|
if (iss & ISS_ES) {
|
|
|
|
ravb_error_interrupt(ndev);
|
|
|
|
result = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2016-04-03 22:54:38 +08:00
|
|
|
/* gPTP interrupt status summary */
|
2016-04-11 04:55:15 +08:00
|
|
|
if (iss & ISS_CGIS) {
|
|
|
|
ravb_ptp_interrupt(ndev);
|
2016-03-15 23:52:16 +08:00
|
|
|
result = IRQ_HANDLED;
|
2016-04-11 04:55:15 +08:00
|
|
|
}
|
2015-06-11 06:02:30 +08:00
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
spin_unlock(&priv->lock);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-04-03 22:54:38 +08:00
|
|
|
/* Timestamp/Error/gPTP interrupt handler */
|
|
|
|
static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = dev_id;
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
irqreturn_t result = IRQ_NONE;
|
|
|
|
u32 iss;
|
|
|
|
|
|
|
|
spin_lock(&priv->lock);
|
|
|
|
/* Get interrupt status */
|
|
|
|
iss = ravb_read(ndev, ISS);
|
|
|
|
|
|
|
|
/* Timestamp updated */
|
|
|
|
if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
|
|
|
|
result = IRQ_HANDLED;
|
|
|
|
|
|
|
|
/* Error status summary */
|
|
|
|
if (iss & ISS_ES) {
|
|
|
|
ravb_error_interrupt(ndev);
|
|
|
|
result = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* gPTP interrupt status summary */
|
2016-04-11 04:55:15 +08:00
|
|
|
if (iss & ISS_CGIS) {
|
|
|
|
ravb_ptp_interrupt(ndev);
|
2016-04-03 22:54:38 +08:00
|
|
|
result = IRQ_HANDLED;
|
2016-04-11 04:55:15 +08:00
|
|
|
}
|
2016-04-03 22:54:38 +08:00
|
|
|
|
|
|
|
spin_unlock(&priv->lock);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = dev_id;
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
irqreturn_t result = IRQ_NONE;
|
|
|
|
|
|
|
|
spin_lock(&priv->lock);
|
|
|
|
|
|
|
|
/* Network control/Best effort queue RX/TX */
|
|
|
|
if (ravb_queue_interrupt(ndev, q))
|
|
|
|
result = IRQ_HANDLED;
|
|
|
|
|
|
|
|
spin_unlock(&priv->lock);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
|
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
static int ravb_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = napi->dev;
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
unsigned long flags;
|
|
|
|
int q = napi - priv->napi;
|
|
|
|
int mask = BIT(q);
|
|
|
|
int quota = budget;
|
|
|
|
|
2021-04-21 12:52:46 +08:00
|
|
|
/* Processing RX Descriptor Ring */
|
|
|
|
/* Clear RX interrupt */
|
|
|
|
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
|
|
|
|
if (ravb_rx(ndev, "a, q))
|
|
|
|
goto out;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
2021-04-21 12:52:46 +08:00
|
|
|
/* Processing RX Descriptor Ring */
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
|
/* Clear TX interrupt */
|
|
|
|
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
|
|
|
|
ravb_tx_free(ndev, q, true);
|
|
|
|
netif_wake_subqueue(ndev, q);
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
napi_complete(napi);
|
|
|
|
|
|
|
|
/* Re-enable RX/TX interrupts */
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
2016-04-03 22:54:38 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN2) {
|
|
|
|
ravb_modify(ndev, RIC0, mask, mask);
|
|
|
|
ravb_modify(ndev, TIC, mask, mask);
|
|
|
|
} else {
|
|
|
|
ravb_write(ndev, mask, RIE0);
|
|
|
|
ravb_write(ndev, mask, TIE);
|
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
|
|
|
|
/* Receive error message handling */
|
|
|
|
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
|
|
|
|
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
|
2017-01-12 20:21:06 +08:00
|
|
|
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
|
2015-06-11 06:01:43 +08:00
|
|
|
ndev->stats.rx_over_errors = priv->rx_over_errors;
|
2017-01-12 20:21:06 +08:00
|
|
|
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
|
2015-06-11 06:01:43 +08:00
|
|
|
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
|
|
|
|
out:
|
|
|
|
return budget - quota;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PHY state control function */
|
|
|
|
static void ravb_adjust_link(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2016-08-20 06:52:18 +08:00
|
|
|
struct phy_device *phydev = ndev->phydev;
|
2015-06-11 06:01:43 +08:00
|
|
|
bool new_state = false;
|
2018-07-04 16:14:51 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
|
|
|
|
|
/* Disable TX and RX right over here, if E-MAC change is ignored */
|
|
|
|
if (priv->no_avb_link)
|
|
|
|
ravb_rcv_snd_disable(ndev);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
if (phydev->link) {
|
|
|
|
if (phydev->speed != priv->speed) {
|
|
|
|
new_state = true;
|
|
|
|
priv->speed = phydev->speed;
|
|
|
|
ravb_set_rate(ndev);
|
|
|
|
}
|
|
|
|
if (!priv->link) {
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(ndev, ECMR, ECMR_TXF, 0);
|
2015-06-11 06:01:43 +08:00
|
|
|
new_state = true;
|
|
|
|
priv->link = phydev->link;
|
|
|
|
}
|
|
|
|
} else if (priv->link) {
|
|
|
|
new_state = true;
|
|
|
|
priv->link = 0;
|
|
|
|
priv->speed = 0;
|
|
|
|
}
|
|
|
|
|
2018-07-04 16:14:51 +08:00
|
|
|
/* Enable TX and RX right over here, if E-MAC change is ignored */
|
|
|
|
if (priv->no_avb_link && phydev->link)
|
|
|
|
ravb_rcv_snd_enable(ndev);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
if (new_state && netif_msg_link(priv))
|
|
|
|
phy_print_status(phydev);
|
|
|
|
}
|
|
|
|
|
2017-01-28 03:46:27 +08:00
|
|
|
static const struct soc_device_attribute r8a7795es10[] = {
|
|
|
|
{ .soc_id = "r8a7795", .revision = "ES1.0", },
|
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* PHY init function */
|
|
|
|
static int ravb_phy_init(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct device_node *np = ndev->dev.parent->of_node;
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
struct phy_device *phydev;
|
|
|
|
struct device_node *pn;
|
ravb: Mask PHY mode to avoid inserting delays twice
Until recently, the Micrel KSZ9031 PHY driver ignored any PHY mode
("RGMII-*ID") settings, but used the hardware defaults, augmented by
explicit configuration of individual skew values using the "*-skew-ps"
DT properties. The lack of PHY mode support was compensated by the
EtherAVB MAC driver, which configures TX and/or RX internal delay
itself, based on the PHY mode.
However, now the KSZ9031 driver has gained PHY mode support, delays may
be configured twice, causing regressions. E.g. on the Renesas
Salvator-X board with R-Car M3-W ES1.0, TX performance dropped from ca.
400 Mbps to 0.1-0.3 Mbps, as measured by nuttcp.
As internal delay configuration supported by the KSZ9031 PHY is too
limited for some use cases, the ability to configure MAC internal delay
is deemed useful and necessary. Hence a proper fix would involve
splitting internal delay configuration in two parts, one for the PHY,
and one for the MAC. However, this would require adding new DT
properties, thus breaking DTB backwards-compatibility.
Hence fix the regression in a backwards-compatibility way, by letting
the EtherAVB driver mask the PHY mode when it has inserted a delay, to
avoid the PHY driver adding a second delay. This also fixes messages
like:
Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: *-skew-ps values should be used only with phy-mode = "rgmii"
as the PHY no longer sees the original RGMII-*ID mode.
Solving the issue by splitting configuration in two parts can be handled
in future patches, and would require retaining a backwards-compatibility
mode anyway.
Fixes: bcf3440c6dd78bfe ("net: phy: micrel: add phy-mode support for the KSZ9031 PHY")
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-29 20:25:40 +08:00
|
|
|
phy_interface_t iface;
|
2015-12-15 18:44:13 +08:00
|
|
|
int err;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
priv->link = 0;
|
|
|
|
priv->speed = 0;
|
|
|
|
|
|
|
|
/* Try connecting to PHY */
|
|
|
|
pn = of_parse_phandle(np, "phy-handle", 0);
|
2015-12-15 18:44:13 +08:00
|
|
|
if (!pn) {
|
|
|
|
/* In the case of a fixed PHY, the DT node associated
|
|
|
|
* to the PHY is the Ethernet MAC DT node.
|
|
|
|
*/
|
|
|
|
if (of_phy_is_fixed_link(np)) {
|
|
|
|
err = of_phy_register_fixed_link(np);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
pn = of_node_get(np);
|
|
|
|
}
|
ravb: Mask PHY mode to avoid inserting delays twice
Until recently, the Micrel KSZ9031 PHY driver ignored any PHY mode
("RGMII-*ID") settings, but used the hardware defaults, augmented by
explicit configuration of individual skew values using the "*-skew-ps"
DT properties. The lack of PHY mode support was compensated by the
EtherAVB MAC driver, which configures TX and/or RX internal delay
itself, based on the PHY mode.
However, now the KSZ9031 driver has gained PHY mode support, delays may
be configured twice, causing regressions. E.g. on the Renesas
Salvator-X board with R-Car M3-W ES1.0, TX performance dropped from ca.
400 Mbps to 0.1-0.3 Mbps, as measured by nuttcp.
As internal delay configuration supported by the KSZ9031 PHY is too
limited for some use cases, the ability to configure MAC internal delay
is deemed useful and necessary. Hence a proper fix would involve
splitting internal delay configuration in two parts, one for the PHY,
and one for the MAC. However, this would require adding new DT
properties, thus breaking DTB backwards-compatibility.
Hence fix the regression in a backwards-compatibility way, by letting
the EtherAVB driver mask the PHY mode when it has inserted a delay, to
avoid the PHY driver adding a second delay. This also fixes messages
like:
Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: *-skew-ps values should be used only with phy-mode = "rgmii"
as the PHY no longer sees the original RGMII-*ID mode.
Solving the issue by splitting configuration in two parts can be handled
in future patches, and would require retaining a backwards-compatibility
mode anyway.
Fixes: bcf3440c6dd78bfe ("net: phy: micrel: add phy-mode support for the KSZ9031 PHY")
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-29 20:25:40 +08:00
|
|
|
|
2020-10-01 18:10:08 +08:00
|
|
|
iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
|
|
|
|
: priv->phy_interface;
|
ravb: Mask PHY mode to avoid inserting delays twice
Until recently, the Micrel KSZ9031 PHY driver ignored any PHY mode
("RGMII-*ID") settings, but used the hardware defaults, augmented by
explicit configuration of individual skew values using the "*-skew-ps"
DT properties. The lack of PHY mode support was compensated by the
EtherAVB MAC driver, which configures TX and/or RX internal delay
itself, based on the PHY mode.
However, now the KSZ9031 driver has gained PHY mode support, delays may
be configured twice, causing regressions. E.g. on the Renesas
Salvator-X board with R-Car M3-W ES1.0, TX performance dropped from ca.
400 Mbps to 0.1-0.3 Mbps, as measured by nuttcp.
As internal delay configuration supported by the KSZ9031 PHY is too
limited for some use cases, the ability to configure MAC internal delay
is deemed useful and necessary. Hence a proper fix would involve
splitting internal delay configuration in two parts, one for the PHY,
and one for the MAC. However, this would require adding new DT
properties, thus breaking DTB backwards-compatibility.
Hence fix the regression in a backwards-compatibility way, by letting
the EtherAVB driver mask the PHY mode when it has inserted a delay, to
avoid the PHY driver adding a second delay. This also fixes messages
like:
Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: *-skew-ps values should be used only with phy-mode = "rgmii"
as the PHY no longer sees the original RGMII-*ID mode.
Solving the issue by splitting configuration in two parts can be handled
in future patches, and would require retaining a backwards-compatibility
mode anyway.
Fixes: bcf3440c6dd78bfe ("net: phy: micrel: add phy-mode support for the KSZ9031 PHY")
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-29 20:25:40 +08:00
|
|
|
phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
|
2016-08-01 15:02:39 +08:00
|
|
|
of_node_put(pn);
|
2015-06-11 06:01:43 +08:00
|
|
|
if (!phydev) {
|
|
|
|
netdev_err(ndev, "failed to connect PHY\n");
|
2016-11-29 02:25:06 +08:00
|
|
|
err = -ENOENT;
|
|
|
|
goto err_deregister_fixed_link;
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
|
2017-01-28 03:46:27 +08:00
|
|
|
/* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
|
2015-09-30 14:15:55 +08:00
|
|
|
* at this time.
|
|
|
|
*/
|
2017-01-28 03:46:27 +08:00
|
|
|
if (soc_device_match(r8a7795es10)) {
|
2015-09-30 14:15:55 +08:00
|
|
|
err = phy_set_max_speed(phydev, SPEED_100);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
|
2016-11-29 02:25:06 +08:00
|
|
|
goto err_phy_disconnect;
|
2015-09-30 14:15:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
|
|
|
|
}
|
|
|
|
|
2018-09-21 21:52:26 +08:00
|
|
|
/* 10BASE, Pause and Asym Pause is not supported */
|
2018-09-12 07:53:14 +08:00
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
|
2018-09-21 21:52:26 +08:00
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
|
2015-12-13 23:15:58 +08:00
|
|
|
|
2018-11-21 19:21:17 +08:00
|
|
|
/* Half Duplex is not supported */
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
|
|
|
|
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
|
|
|
|
|
2016-01-07 03:11:13 +08:00
|
|
|
phy_attached_info(phydev);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
return 0;
|
2016-11-29 02:25:06 +08:00
|
|
|
|
|
|
|
err_phy_disconnect:
|
|
|
|
phy_disconnect(phydev);
|
|
|
|
err_deregister_fixed_link:
|
|
|
|
if (of_phy_is_fixed_link(np))
|
|
|
|
of_phy_deregister_fixed_link(np);
|
|
|
|
|
|
|
|
return err;
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* PHY control start function */
|
|
|
|
static int ravb_phy_start(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = ravb_phy_init(ndev);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2016-08-20 06:52:18 +08:00
|
|
|
phy_start(ndev->phydev);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 ravb_get_msglevel(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
return priv->msg_enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_set_msglevel(struct net_device *ndev, u32 value)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
priv->msg_enable = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
|
|
|
|
"rx_queue_0_current",
|
|
|
|
"tx_queue_0_current",
|
|
|
|
"rx_queue_0_dirty",
|
|
|
|
"tx_queue_0_dirty",
|
|
|
|
"rx_queue_0_packets",
|
|
|
|
"tx_queue_0_packets",
|
|
|
|
"rx_queue_0_bytes",
|
|
|
|
"tx_queue_0_bytes",
|
|
|
|
"rx_queue_0_mcast_packets",
|
|
|
|
"rx_queue_0_errors",
|
|
|
|
"rx_queue_0_crc_errors",
|
|
|
|
"rx_queue_0_frame_errors",
|
|
|
|
"rx_queue_0_length_errors",
|
|
|
|
"rx_queue_0_missed_errors",
|
|
|
|
"rx_queue_0_over_errors",
|
|
|
|
|
|
|
|
"rx_queue_1_current",
|
|
|
|
"tx_queue_1_current",
|
|
|
|
"rx_queue_1_dirty",
|
|
|
|
"tx_queue_1_dirty",
|
|
|
|
"rx_queue_1_packets",
|
|
|
|
"tx_queue_1_packets",
|
|
|
|
"rx_queue_1_bytes",
|
|
|
|
"tx_queue_1_bytes",
|
|
|
|
"rx_queue_1_mcast_packets",
|
|
|
|
"rx_queue_1_errors",
|
|
|
|
"rx_queue_1_crc_errors",
|
2015-12-04 06:51:10 +08:00
|
|
|
"rx_queue_1_frame_errors",
|
2015-06-11 06:01:43 +08:00
|
|
|
"rx_queue_1_length_errors",
|
|
|
|
"rx_queue_1_missed_errors",
|
|
|
|
"rx_queue_1_over_errors",
|
|
|
|
};
|
|
|
|
|
|
|
|
#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
|
|
|
|
|
|
|
|
static int ravb_get_sset_count(struct net_device *netdev, int sset)
|
|
|
|
{
|
|
|
|
switch (sset) {
|
|
|
|
case ETH_SS_STATS:
|
|
|
|
return RAVB_STATS_LEN;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_get_ethtool_stats(struct net_device *ndev,
|
2018-07-16 20:19:25 +08:00
|
|
|
struct ethtool_stats *estats, u64 *data)
|
2015-06-11 06:01:43 +08:00
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
int i = 0;
|
|
|
|
int q;
|
|
|
|
|
|
|
|
/* Device-specific stats */
|
|
|
|
for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
|
|
|
|
struct net_device_stats *stats = &priv->stats[q];
|
|
|
|
|
|
|
|
data[i++] = priv->cur_rx[q];
|
|
|
|
data[i++] = priv->cur_tx[q];
|
|
|
|
data[i++] = priv->dirty_rx[q];
|
|
|
|
data[i++] = priv->dirty_tx[q];
|
|
|
|
data[i++] = stats->rx_packets;
|
|
|
|
data[i++] = stats->tx_packets;
|
|
|
|
data[i++] = stats->rx_bytes;
|
|
|
|
data[i++] = stats->tx_bytes;
|
|
|
|
data[i++] = stats->multicast;
|
|
|
|
data[i++] = stats->rx_errors;
|
|
|
|
data[i++] = stats->rx_crc_errors;
|
|
|
|
data[i++] = stats->rx_frame_errors;
|
|
|
|
data[i++] = stats->rx_length_errors;
|
|
|
|
data[i++] = stats->rx_missed_errors;
|
|
|
|
data[i++] = stats->rx_over_errors;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
|
|
|
|
{
|
|
|
|
switch (stringset) {
|
|
|
|
case ETH_SS_STATS:
|
2018-07-16 20:19:26 +08:00
|
|
|
memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
|
2015-06-11 06:01:43 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_get_ringparam(struct net_device *ndev,
|
|
|
|
struct ethtool_ringparam *ring)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
ring->rx_max_pending = BE_RX_RING_MAX;
|
|
|
|
ring->tx_max_pending = BE_TX_RING_MAX;
|
|
|
|
ring->rx_pending = priv->num_rx_ring[RAVB_BE];
|
|
|
|
ring->tx_pending = priv->num_tx_ring[RAVB_BE];
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ravb_set_ringparam(struct net_device *ndev,
|
|
|
|
struct ethtool_ringparam *ring)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (ring->tx_pending > BE_TX_RING_MAX ||
|
|
|
|
ring->rx_pending > BE_RX_RING_MAX ||
|
|
|
|
ring->tx_pending < BE_TX_RING_MIN ||
|
|
|
|
ring->rx_pending < BE_RX_RING_MIN)
|
|
|
|
return -EINVAL;
|
|
|
|
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (netif_running(ndev)) {
|
|
|
|
netif_device_detach(ndev);
|
2015-06-11 06:02:30 +08:00
|
|
|
/* Stop PTP Clock driver */
|
2016-02-06 22:47:22 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN2)
|
|
|
|
ravb_ptp_stop(ndev);
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Wait for DMA stopping */
|
|
|
|
error = ravb_stop_dma(ndev);
|
|
|
|
if (error) {
|
|
|
|
netdev_err(ndev,
|
|
|
|
"cannot set ringparam! Any AVB processes are still running?\n");
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
synchronize_irq(ndev->irq);
|
|
|
|
|
|
|
|
/* Free all the skb's in the RX queue and the DMA buffers. */
|
|
|
|
ravb_ring_free(ndev, RAVB_BE);
|
|
|
|
ravb_ring_free(ndev, RAVB_NC);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set new parameters */
|
|
|
|
priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
|
|
|
|
priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
|
|
|
|
|
|
|
|
if (netif_running(ndev)) {
|
|
|
|
error = ravb_dmac_init(ndev);
|
|
|
|
if (error) {
|
|
|
|
netdev_err(ndev,
|
|
|
|
"%s: ravb_dmac_init() failed, error %d\n",
|
|
|
|
__func__, error);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
ravb_emac_init(ndev);
|
|
|
|
|
2015-06-11 06:02:30 +08:00
|
|
|
/* Initialise PTP Clock driver */
|
2016-02-06 22:47:22 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN2)
|
|
|
|
ravb_ptp_init(ndev, priv->pdev);
|
2015-06-11 06:02:30 +08:00
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
netif_device_attach(ndev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ravb_get_ts_info(struct net_device *ndev,
|
|
|
|
struct ethtool_ts_info *info)
|
|
|
|
{
|
2015-06-11 06:02:30 +08:00
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
info->so_timestamping =
|
|
|
|
SOF_TIMESTAMPING_TX_SOFTWARE |
|
|
|
|
SOF_TIMESTAMPING_RX_SOFTWARE |
|
|
|
|
SOF_TIMESTAMPING_SOFTWARE |
|
|
|
|
SOF_TIMESTAMPING_TX_HARDWARE |
|
|
|
|
SOF_TIMESTAMPING_RX_HARDWARE |
|
|
|
|
SOF_TIMESTAMPING_RAW_HARDWARE;
|
|
|
|
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
|
|
|
|
info->rx_filters =
|
|
|
|
(1 << HWTSTAMP_FILTER_NONE) |
|
|
|
|
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
|
|
|
(1 << HWTSTAMP_FILTER_ALL);
|
2015-06-11 06:02:30 +08:00
|
|
|
info->phc_index = ptp_clock_index(priv->ptp.clock);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-01 18:14:36 +08:00
|
|
|
static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
2017-10-12 16:24:53 +08:00
|
|
|
wol->supported = WAKE_MAGIC;
|
|
|
|
wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
|
2017-08-01 18:14:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
2017-10-12 16:24:53 +08:00
|
|
|
if (wol->wolopts & ~WAKE_MAGIC)
|
2017-08-01 18:14:36 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
|
|
|
|
|
|
|
|
device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
static const struct ethtool_ops ravb_ethtool_ops = {
|
2018-07-04 16:16:09 +08:00
|
|
|
.nway_reset = phy_ethtool_nway_reset,
|
2015-06-11 06:01:43 +08:00
|
|
|
.get_msglevel = ravb_get_msglevel,
|
|
|
|
.set_msglevel = ravb_set_msglevel,
|
|
|
|
.get_link = ethtool_op_get_link,
|
|
|
|
.get_strings = ravb_get_strings,
|
|
|
|
.get_ethtool_stats = ravb_get_ethtool_stats,
|
|
|
|
.get_sset_count = ravb_get_sset_count,
|
|
|
|
.get_ringparam = ravb_get_ringparam,
|
|
|
|
.set_ringparam = ravb_set_ringparam,
|
|
|
|
.get_ts_info = ravb_get_ts_info,
|
2018-07-04 16:16:11 +08:00
|
|
|
.get_link_ksettings = phy_ethtool_get_link_ksettings,
|
2018-07-04 16:16:12 +08:00
|
|
|
.set_link_ksettings = phy_ethtool_set_link_ksettings,
|
2017-08-01 18:14:36 +08:00
|
|
|
.get_wol = ravb_get_wol,
|
|
|
|
.set_wol = ravb_set_wol,
|
2015-06-11 06:01:43 +08:00
|
|
|
};
|
|
|
|
|
2016-04-03 22:54:38 +08:00
|
|
|
static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
|
|
|
|
struct net_device *ndev, struct device *dev,
|
|
|
|
const char *ch)
|
|
|
|
{
|
|
|
|
char *name;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
|
|
|
|
if (!name)
|
|
|
|
return -ENOMEM;
|
|
|
|
error = request_irq(irq, handler, 0, name, ndev);
|
|
|
|
if (error)
|
|
|
|
netdev_err(ndev, "cannot request IRQ %s\n", name);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Network device open function for Ethernet AVB */
|
|
|
|
static int ravb_open(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2016-04-03 22:54:38 +08:00
|
|
|
struct platform_device *pdev = priv->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
2015-06-11 06:01:43 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
napi_enable(&priv->napi[RAVB_BE]);
|
|
|
|
napi_enable(&priv->napi[RAVB_NC]);
|
|
|
|
|
2016-04-03 22:54:38 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN2) {
|
|
|
|
error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
|
|
|
|
ndev->name, ndev);
|
2015-09-30 14:15:55 +08:00
|
|
|
if (error) {
|
|
|
|
netdev_err(ndev, "cannot request IRQ\n");
|
2016-04-03 22:54:38 +08:00
|
|
|
goto out_napi_off;
|
2015-09-30 14:15:55 +08:00
|
|
|
}
|
2016-04-03 22:54:38 +08:00
|
|
|
} else {
|
|
|
|
error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
|
|
|
|
dev, "ch22:multi");
|
|
|
|
if (error)
|
|
|
|
goto out_napi_off;
|
|
|
|
error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
|
|
|
|
dev, "ch24:emac");
|
|
|
|
if (error)
|
|
|
|
goto out_free_irq;
|
|
|
|
error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
|
|
|
|
ndev, dev, "ch0:rx_be");
|
|
|
|
if (error)
|
|
|
|
goto out_free_irq_emac;
|
|
|
|
error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
|
|
|
|
ndev, dev, "ch18:tx_be");
|
|
|
|
if (error)
|
|
|
|
goto out_free_irq_be_rx;
|
|
|
|
error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
|
|
|
|
ndev, dev, "ch1:rx_nc");
|
|
|
|
if (error)
|
|
|
|
goto out_free_irq_be_tx;
|
|
|
|
error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
|
|
|
|
ndev, dev, "ch19:tx_nc");
|
|
|
|
if (error)
|
|
|
|
goto out_free_irq_nc_rx;
|
2015-09-30 14:15:55 +08:00
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Device init */
|
|
|
|
error = ravb_dmac_init(ndev);
|
|
|
|
if (error)
|
2016-04-03 22:54:38 +08:00
|
|
|
goto out_free_irq_nc_tx;
|
2015-06-11 06:01:43 +08:00
|
|
|
ravb_emac_init(ndev);
|
|
|
|
|
2015-06-11 06:02:30 +08:00
|
|
|
/* Initialise PTP Clock driver */
|
2015-12-02 01:04:39 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN2)
|
|
|
|
ravb_ptp_init(ndev, priv->pdev);
|
2015-06-11 06:02:30 +08:00
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
netif_tx_start_all_queues(ndev);
|
|
|
|
|
|
|
|
/* PHY control start */
|
|
|
|
error = ravb_phy_start(ndev);
|
|
|
|
if (error)
|
2015-06-11 06:02:30 +08:00
|
|
|
goto out_ptp_stop;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2015-06-11 06:02:30 +08:00
|
|
|
out_ptp_stop:
|
|
|
|
/* Stop PTP Clock driver */
|
2015-12-02 01:04:39 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN2)
|
|
|
|
ravb_ptp_stop(ndev);
|
2016-04-03 22:54:38 +08:00
|
|
|
out_free_irq_nc_tx:
|
|
|
|
if (priv->chip_id == RCAR_GEN2)
|
|
|
|
goto out_free_irq;
|
|
|
|
free_irq(priv->tx_irqs[RAVB_NC], ndev);
|
|
|
|
out_free_irq_nc_rx:
|
|
|
|
free_irq(priv->rx_irqs[RAVB_NC], ndev);
|
|
|
|
out_free_irq_be_tx:
|
|
|
|
free_irq(priv->tx_irqs[RAVB_BE], ndev);
|
|
|
|
out_free_irq_be_rx:
|
|
|
|
free_irq(priv->rx_irqs[RAVB_BE], ndev);
|
|
|
|
out_free_irq_emac:
|
|
|
|
free_irq(priv->emac_irq, ndev);
|
2015-06-11 06:01:43 +08:00
|
|
|
out_free_irq:
|
|
|
|
free_irq(ndev->irq, ndev);
|
|
|
|
out_napi_off:
|
|
|
|
napi_disable(&priv->napi[RAVB_NC]);
|
|
|
|
napi_disable(&priv->napi[RAVB_BE]);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Timeout function for Ethernet AVB */
|
netdev: pass the stuck queue to the timeout handler
This allows incrementing the correct timeout statistic without any mess.
Down the road, devices can learn to reset just the specific queue.
The patch was generated with the following script:
use strict;
use warnings;
our $^I = '.bak';
my @work = (
["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"],
["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"],
["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"],
["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"],
["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"],
["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"],
["drivers/net/appletalk/cops.c", "cops_timeout"],
["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"],
["drivers/net/arcnet/arcnet.c", "arcnet_timeout"],
["drivers/net/arcnet/com20020.c", "arcnet_timeout"],
["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"],
["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"],
["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"],
["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"],
["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"],
["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"],
["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"],
["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"],
["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"],
["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"],
["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"],
["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"],
["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"],
["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"],
["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"],
["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"],
["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"],
["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"],
["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"],
["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"],
["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"],
["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"],
["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"],
["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"],
["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"],
["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"],
["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"],
["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"],
["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"],
["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"],
["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"],
["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"],
["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"],
["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"],
["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"],
["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"],
["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"],
["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"],
["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"],
["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"],
["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"],
["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"],
["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"],
["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"],
["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"],
["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"],
["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"],
["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"],
["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"],
["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"],
["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"],
["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"],
["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"],
["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"],
["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"],
["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"],
["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"],
["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"],
["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"],
["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"],
["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"],
["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"],
["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"],
["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"],
["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"],
["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"],
["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"],
["drivers/net/ethernet/jme.c", "jme_tx_timeout"],
["drivers/net/ethernet/korina.c", "korina_tx_timeout"],
["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"],
["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"],
["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"],
["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"],
["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"],
["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"],
["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"],
["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"],
["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"],
["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"],
["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"],
["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"],
["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"],
["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"],
["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"],
["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"],
["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"],
["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"],
["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"],
["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"],
["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"],
["drivers/net/ethernet/realtek/atp.c", "tx_timeout"],
["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"],
["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"],
["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"],
["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"],
["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"],
["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"],
["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"],
["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"],
["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"],
["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"],
["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"],
["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"],
["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"],
["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"],
["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"],
["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"],
["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"],
["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"],
["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"],
["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"],
["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"],
["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"],
["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"],
["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"],
["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"],
["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"],
["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"],
["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"],
["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"],
["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"],
["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"],
["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"],
["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"],
["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"],
["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"],
["drivers/net/slip/slip.c", "sl_tx_timeout"],
["include/linux/usb/usbnet.h", "usbnet_tx_timeout"],
["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"],
["drivers/net/usb/catc.c", "catc_tx_timeout"],
["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"],
["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"],
["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"],
["drivers/net/usb/hso.c", "hso_net_tx_timeout"],
["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"],
["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"],
["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"],
["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"],
["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"],
["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"],
["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"],
["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"],
["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"],
["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"],
["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"],
["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"],
["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"],
["drivers/net/wan/cosa.c", "cosa_net_timeout"],
["drivers/net/wan/farsync.c", "fst_tx_timeout"],
["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"],
["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"],
["drivers/net/wan/x25_asy.c", "x25_asy_timeout"],
["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"],
["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"],
["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"],
["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"],
["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"],
["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"],
["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"],
["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"],
["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"],
["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"],
["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"],
["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"],
["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"],
["drivers/tty/synclink.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"],
["net/atm/lec.c", "lec_tx_timeout"],
["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"]
);
for my $p (@work) {
my @pair = @$p;
my $file = $pair[0];
my $func = $pair[1];
print STDERR $file , ": ", $func,"\n";
our @ARGV = ($file);
while (<ARGV>) {
if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) {
print STDERR "found $1+$2 in $file\n";
}
if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) {
print STDERR "$func found in $file\n";
}
print;
}
}
where the list of files and functions is simply from:
git grep ndo_tx_timeout, with manual addition of headers
in the rare cases where the function is from a header,
then manually changing the few places which actually
call ndo_tx_timeout.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Heiner Kallweit <hkallweit1@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Shannon Nelson <snelson@pensando.io>
Reviewed-by: Martin Habets <mhabets@solarflare.com>
changes from v9:
fixup a forward declaration
changes from v9:
more leftovers from v3 change
changes from v8:
fix up a missing direct call to timeout
rebased on net-next
changes from v7:
fixup leftovers from v3 change
changes from v6:
fix typo in rtl driver
changes from v5:
add missing files (allow any net device argument name)
changes from v4:
add a missing driver header
changes from v3:
change queue # to unsigned
Changes from v2:
added headers
Changes from v1:
Fix errors found by kbuild:
generalize the pattern a bit, to pick up
a couple of instances missed by the previous
version.
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 22:23:51 +08:00
|
|
|
static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
|
2015-06-11 06:01:43 +08:00
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
netif_err(priv, tx_err, ndev,
|
|
|
|
"transmit timed out, status %08x, resetting...\n",
|
|
|
|
ravb_read(ndev, ISS));
|
|
|
|
|
|
|
|
/* tx_errors count up */
|
|
|
|
ndev->stats.tx_errors++;
|
|
|
|
|
|
|
|
schedule_work(&priv->work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_tx_timeout_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = container_of(work, struct ravb_private,
|
|
|
|
work);
|
|
|
|
struct net_device *ndev = priv->ndev;
|
2020-07-21 14:23:12 +08:00
|
|
|
int error;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
netif_tx_stop_all_queues(ndev);
|
|
|
|
|
2015-06-11 06:02:30 +08:00
|
|
|
/* Stop PTP Clock driver */
|
2016-02-06 22:47:22 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN2)
|
|
|
|
ravb_ptp_stop(ndev);
|
2015-06-11 06:02:30 +08:00
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Wait for DMA stopping */
|
2020-07-21 14:23:12 +08:00
|
|
|
if (ravb_stop_dma(ndev)) {
|
|
|
|
/* If ravb_stop_dma() fails, the hardware is still operating
|
|
|
|
* for TX and/or RX. So, this should not call the following
|
|
|
|
* functions because ravb_dmac_init() is possible to fail too.
|
|
|
|
* Also, this should not retry ravb_stop_dma() again and again
|
|
|
|
* here because it's possible to wait forever. So, this just
|
|
|
|
* re-enables the TX and RX and skip the following
|
|
|
|
* re-initialization procedure.
|
|
|
|
*/
|
|
|
|
ravb_rcv_snd_enable(ndev);
|
|
|
|
goto out;
|
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
ravb_ring_free(ndev, RAVB_BE);
|
|
|
|
ravb_ring_free(ndev, RAVB_NC);
|
|
|
|
|
|
|
|
/* Device init */
|
2020-07-21 14:23:12 +08:00
|
|
|
error = ravb_dmac_init(ndev);
|
|
|
|
if (error) {
|
|
|
|
/* If ravb_dmac_init() fails, descriptors are freed. So, this
|
|
|
|
* should return here to avoid re-enabling the TX and RX in
|
|
|
|
* ravb_emac_init().
|
|
|
|
*/
|
|
|
|
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
|
|
|
|
__func__, error);
|
|
|
|
return;
|
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
ravb_emac_init(ndev);
|
|
|
|
|
2020-07-21 14:23:12 +08:00
|
|
|
out:
|
2015-06-11 06:02:30 +08:00
|
|
|
/* Initialise PTP Clock driver */
|
2016-02-06 22:47:22 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN2)
|
|
|
|
ravb_ptp_init(ndev, priv->pdev);
|
2015-06-11 06:02:30 +08:00
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
netif_tx_start_all_queues(ndev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Packet transmit function for Ethernet AVB */
|
|
|
|
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2018-09-19 16:06:21 +08:00
|
|
|
int num_tx_desc = priv->num_tx_desc;
|
2015-06-11 06:01:43 +08:00
|
|
|
u16 q = skb_get_queue_mapping(skb);
|
2015-07-11 02:10:10 +08:00
|
|
|
struct ravb_tstamp_skb *ts_skb;
|
2015-06-11 06:01:43 +08:00
|
|
|
struct ravb_tx_desc *desc;
|
|
|
|
unsigned long flags;
|
|
|
|
u32 dma_addr;
|
|
|
|
void *buffer;
|
|
|
|
u32 entry;
|
2015-07-26 04:42:01 +08:00
|
|
|
u32 len;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
2015-07-26 04:42:01 +08:00
|
|
|
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
|
2018-09-19 16:06:21 +08:00
|
|
|
num_tx_desc) {
|
2015-06-11 06:01:43 +08:00
|
|
|
netif_err(priv, tx_queued, ndev,
|
|
|
|
"still transmitting with the full ring!\n");
|
|
|
|
netif_stop_subqueue(ndev, q);
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skb_put_padto(skb, ETH_ZLEN))
|
2017-04-22 18:46:56 +08:00
|
|
|
goto exit;
|
|
|
|
|
2018-09-19 16:06:21 +08:00
|
|
|
entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
|
|
|
|
priv->tx_skb[q][entry / num_tx_desc] = skb;
|
|
|
|
|
|
|
|
if (num_tx_desc > 1) {
|
|
|
|
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
|
|
|
|
entry / num_tx_desc * DPTR_ALIGN;
|
|
|
|
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
|
|
|
|
|
|
|
|
/* Zero length DMA descriptors are problematic as they seem
|
|
|
|
* to terminate DMA transfers. Avoid them by simply using a
|
|
|
|
* length of DPTR_ALIGN (4) when skb data is aligned to
|
|
|
|
* DPTR_ALIGN.
|
|
|
|
*
|
|
|
|
* As skb is guaranteed to have at least ETH_ZLEN (60)
|
|
|
|
* bytes of data by the call to skb_put_padto() above this
|
|
|
|
* is safe with respect to both the length of the first DMA
|
|
|
|
* descriptor (len) overflowing the available data and the
|
|
|
|
* length of the second DMA descriptor (skb->len - len)
|
|
|
|
* being negative.
|
|
|
|
*/
|
|
|
|
if (len == 0)
|
|
|
|
len = DPTR_ALIGN;
|
2017-01-16 18:45:21 +08:00
|
|
|
|
2018-09-19 16:06:21 +08:00
|
|
|
memcpy(buffer, skb->data, len);
|
|
|
|
dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
|
|
|
goto drop;
|
2015-07-26 04:42:01 +08:00
|
|
|
|
2018-09-19 16:06:21 +08:00
|
|
|
desc = &priv->tx_ring[q][entry];
|
|
|
|
desc->ds_tagl = cpu_to_le16(len);
|
|
|
|
desc->dptr = cpu_to_le32(dma_addr);
|
2015-07-26 04:42:01 +08:00
|
|
|
|
2018-09-19 16:06:21 +08:00
|
|
|
buffer = skb->data + len;
|
|
|
|
len = skb->len - len;
|
|
|
|
dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
|
|
|
goto unmap;
|
2015-07-26 04:42:01 +08:00
|
|
|
|
2018-09-19 16:06:21 +08:00
|
|
|
desc++;
|
|
|
|
} else {
|
|
|
|
desc = &priv->tx_ring[q][entry];
|
|
|
|
len = skb->len;
|
|
|
|
dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
|
|
|
goto drop;
|
|
|
|
}
|
2015-07-26 04:42:01 +08:00
|
|
|
desc->ds_tagl = cpu_to_le16(len);
|
2015-06-11 06:01:43 +08:00
|
|
|
desc->dptr = cpu_to_le32(dma_addr);
|
|
|
|
|
|
|
|
/* TX timestamp required */
|
|
|
|
if (q == RAVB_NC) {
|
|
|
|
ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
|
|
|
|
if (!ts_skb) {
|
2018-09-19 16:06:21 +08:00
|
|
|
if (num_tx_desc > 1) {
|
|
|
|
desc--;
|
|
|
|
dma_unmap_single(ndev->dev.parent, dma_addr,
|
|
|
|
len, DMA_TO_DEVICE);
|
|
|
|
}
|
2015-07-26 04:42:01 +08:00
|
|
|
goto unmap;
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
2019-08-16 23:17:02 +08:00
|
|
|
ts_skb->skb = skb_get(skb);
|
2015-06-11 06:01:43 +08:00
|
|
|
ts_skb->tag = priv->ts_skb_tag++;
|
|
|
|
priv->ts_skb_tag &= 0x3ff;
|
|
|
|
list_add_tail(&ts_skb->list, &priv->ts_skb_list);
|
|
|
|
|
|
|
|
/* TAG and timestamp required flag */
|
|
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
|
|
|
desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
|
2018-07-16 20:19:27 +08:00
|
|
|
desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
|
2016-03-27 18:22:02 +08:00
|
|
|
skb_tx_timestamp(skb);
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Descriptor type must be set after all the above writes */
|
|
|
|
dma_wmb();
|
2018-09-19 16:06:21 +08:00
|
|
|
if (num_tx_desc > 1) {
|
|
|
|
desc->die_dt = DT_FEND;
|
|
|
|
desc--;
|
|
|
|
desc->die_dt = DT_FSTART;
|
|
|
|
} else {
|
|
|
|
desc->die_dt = DT_FSINGLE;
|
|
|
|
}
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
2018-09-19 16:06:21 +08:00
|
|
|
priv->cur_tx[q] += num_tx_desc;
|
2015-07-26 04:42:01 +08:00
|
|
|
if (priv->cur_tx[q] - priv->dirty_tx[q] >
|
2018-09-19 16:06:21 +08:00
|
|
|
(priv->num_tx_ring[q] - 1) * num_tx_desc &&
|
2017-01-26 21:29:27 +08:00
|
|
|
!ravb_tx_free(ndev, q, true))
|
2015-06-11 06:01:43 +08:00
|
|
|
netif_stop_subqueue(ndev, q);
|
|
|
|
|
|
|
|
exit:
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
2015-07-26 04:42:01 +08:00
|
|
|
unmap:
|
2015-09-30 14:15:53 +08:00
|
|
|
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
2015-07-26 04:42:01 +08:00
|
|
|
le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
|
2015-06-11 06:01:43 +08:00
|
|
|
drop:
|
|
|
|
dev_kfree_skb_any(skb);
|
2018-09-19 16:06:21 +08:00
|
|
|
priv->tx_skb[q][entry / num_tx_desc] = NULL;
|
2015-06-11 06:01:43 +08:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
2019-03-20 18:02:06 +08:00
|
|
|
struct net_device *sb_dev)
|
2015-06-11 06:01:43 +08:00
|
|
|
{
|
|
|
|
/* If skb needs TX timestamp, it is handled in network control queue */
|
|
|
|
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
|
|
|
|
RAVB_BE;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
struct net_device_stats *nstats, *stats0, *stats1;
|
|
|
|
|
|
|
|
nstats = &ndev->stats;
|
|
|
|
stats0 = &priv->stats[RAVB_BE];
|
|
|
|
stats1 = &priv->stats[RAVB_NC];
|
|
|
|
|
2019-09-05 23:10:59 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN3) {
|
|
|
|
nstats->tx_dropped += ravb_read(ndev, TROCR);
|
|
|
|
ravb_write(ndev, 0, TROCR); /* (write clear) */
|
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
|
|
|
|
nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
|
|
|
|
nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
|
|
|
|
nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
|
|
|
|
nstats->multicast = stats0->multicast + stats1->multicast;
|
|
|
|
nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
|
|
|
|
nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
|
|
|
|
nstats->rx_frame_errors =
|
|
|
|
stats0->rx_frame_errors + stats1->rx_frame_errors;
|
|
|
|
nstats->rx_length_errors =
|
|
|
|
stats0->rx_length_errors + stats1->rx_length_errors;
|
|
|
|
nstats->rx_missed_errors =
|
|
|
|
stats0->rx_missed_errors + stats1->rx_missed_errors;
|
|
|
|
nstats->rx_over_errors =
|
|
|
|
stats0->rx_over_errors + stats1->rx_over_errors;
|
|
|
|
|
|
|
|
return nstats;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update promiscuous bit */
|
|
|
|
static void ravb_set_rx_mode(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(ndev, ECMR, ECMR_PRM,
|
|
|
|
ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
|
2015-06-11 06:01:43 +08:00
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Device close function for Ethernet AVB */
|
|
|
|
static int ravb_close(struct net_device *ndev)
|
|
|
|
{
|
2016-11-29 02:25:06 +08:00
|
|
|
struct device_node *np = ndev->dev.parent->of_node;
|
2015-06-11 06:01:43 +08:00
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
|
|
|
|
|
|
|
|
netif_tx_stop_all_queues(ndev);
|
|
|
|
|
|
|
|
/* Disable interrupts by clearing the interrupt masks. */
|
|
|
|
ravb_write(ndev, 0, RIC0);
|
|
|
|
ravb_write(ndev, 0, RIC2);
|
|
|
|
ravb_write(ndev, 0, TIC);
|
|
|
|
|
2015-06-11 06:02:30 +08:00
|
|
|
/* Stop PTP Clock driver */
|
2015-12-02 01:04:39 +08:00
|
|
|
if (priv->chip_id == RCAR_GEN2)
|
|
|
|
ravb_ptp_stop(ndev);
|
2015-06-11 06:02:30 +08:00
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Set the config mode to stop the AVB-DMAC's processes */
|
|
|
|
if (ravb_stop_dma(ndev) < 0)
|
|
|
|
netdev_err(ndev,
|
|
|
|
"device will be stopped after h/w processes are done.\n");
|
|
|
|
|
|
|
|
/* Clear the timestamp list */
|
|
|
|
list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
|
|
|
|
list_del(&ts_skb->list);
|
2019-08-16 23:17:02 +08:00
|
|
|
kfree_skb(ts_skb->skb);
|
2015-06-11 06:01:43 +08:00
|
|
|
kfree(ts_skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PHY disconnect */
|
2016-08-20 06:52:18 +08:00
|
|
|
if (ndev->phydev) {
|
|
|
|
phy_stop(ndev->phydev);
|
|
|
|
phy_disconnect(ndev->phydev);
|
2016-11-29 02:25:06 +08:00
|
|
|
if (of_phy_is_fixed_link(np))
|
|
|
|
of_phy_deregister_fixed_link(np);
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
|
2016-05-17 17:05:34 +08:00
|
|
|
if (priv->chip_id != RCAR_GEN2) {
|
|
|
|
free_irq(priv->tx_irqs[RAVB_NC], ndev);
|
|
|
|
free_irq(priv->rx_irqs[RAVB_NC], ndev);
|
|
|
|
free_irq(priv->tx_irqs[RAVB_BE], ndev);
|
|
|
|
free_irq(priv->rx_irqs[RAVB_BE], ndev);
|
2016-05-07 19:17:11 +08:00
|
|
|
free_irq(priv->emac_irq, ndev);
|
2016-05-17 17:05:34 +08:00
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
free_irq(ndev->irq, ndev);
|
|
|
|
|
|
|
|
napi_disable(&priv->napi[RAVB_NC]);
|
|
|
|
napi_disable(&priv->napi[RAVB_BE]);
|
|
|
|
|
|
|
|
/* Free all the skb's in the RX queue and the DMA buffers. */
|
|
|
|
ravb_ring_free(ndev, RAVB_BE);
|
|
|
|
ravb_ring_free(ndev, RAVB_NC);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
struct hwtstamp_config config;
|
|
|
|
|
|
|
|
config.flags = 0;
|
|
|
|
config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
|
|
|
|
HWTSTAMP_TX_OFF;
|
2020-10-26 18:21:30 +08:00
|
|
|
switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
|
|
|
|
case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
|
2015-06-11 06:01:43 +08:00
|
|
|
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
|
2020-10-26 18:21:30 +08:00
|
|
|
break;
|
|
|
|
case RAVB_RXTSTAMP_TYPE_ALL:
|
2015-06-11 06:01:43 +08:00
|
|
|
config.rx_filter = HWTSTAMP_FILTER_ALL;
|
2020-10-26 18:21:30 +08:00
|
|
|
break;
|
|
|
|
default:
|
2015-06-11 06:01:43 +08:00
|
|
|
config.rx_filter = HWTSTAMP_FILTER_NONE;
|
2020-10-26 18:21:30 +08:00
|
|
|
}
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
|
|
|
|
-EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Control hardware time stamping */
|
|
|
|
static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
struct hwtstamp_config config;
|
|
|
|
u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
|
|
|
|
u32 tstamp_tx_ctrl;
|
|
|
|
|
|
|
|
if (copy_from_user(&config, req->ifr_data, sizeof(config)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* Reserved for future extensions */
|
|
|
|
if (config.flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (config.tx_type) {
|
|
|
|
case HWTSTAMP_TX_OFF:
|
|
|
|
tstamp_tx_ctrl = 0;
|
|
|
|
break;
|
|
|
|
case HWTSTAMP_TX_ON:
|
|
|
|
tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (config.rx_filter) {
|
|
|
|
case HWTSTAMP_FILTER_NONE:
|
|
|
|
tstamp_rx_ctrl = 0;
|
|
|
|
break;
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
|
|
|
|
tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
config.rx_filter = HWTSTAMP_FILTER_ALL;
|
|
|
|
tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
|
|
|
|
priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
|
|
|
|
|
|
|
|
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
|
|
|
|
-EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ioctl to device function */
|
|
|
|
static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
|
|
|
|
{
|
2016-08-20 06:52:18 +08:00
|
|
|
struct phy_device *phydev = ndev->phydev;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
if (!netif_running(ndev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!phydev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGHWTSTAMP:
|
|
|
|
return ravb_hwtstamp_get(ndev, req);
|
|
|
|
case SIOCSHWTSTAMP:
|
|
|
|
return ravb_hwtstamp_set(ndev, req);
|
|
|
|
}
|
|
|
|
|
|
|
|
return phy_mii_ioctl(phydev, req, cmd);
|
|
|
|
}
|
|
|
|
|
2018-02-17 00:10:08 +08:00
|
|
|
static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
|
|
|
|
{
|
2019-11-14 09:49:49 +08:00
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2018-02-17 00:10:08 +08:00
|
|
|
|
|
|
|
ndev->mtu = new_mtu;
|
2019-11-14 09:49:49 +08:00
|
|
|
|
|
|
|
if (netif_running(ndev)) {
|
|
|
|
synchronize_irq(priv->emac_irq);
|
|
|
|
ravb_emac_init(ndev);
|
|
|
|
}
|
|
|
|
|
2018-02-17 00:10:08 +08:00
|
|
|
netdev_update_features(ndev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-04 15:54:27 +08:00
|
|
|
static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
|
|
|
|
|
/* Disable TX and RX */
|
|
|
|
ravb_rcv_snd_disable(ndev);
|
|
|
|
|
|
|
|
/* Modify RX Checksum setting */
|
|
|
|
ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
|
|
|
|
|
|
|
|
/* Enable TX and RX */
|
|
|
|
ravb_rcv_snd_enable(ndev);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ravb_set_features(struct net_device *ndev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
netdev_features_t changed = ndev->features ^ features;
|
|
|
|
|
|
|
|
if (changed & NETIF_F_RXCSUM)
|
|
|
|
ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
|
|
|
|
|
|
|
|
ndev->features = features;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
static const struct net_device_ops ravb_netdev_ops = {
|
|
|
|
.ndo_open = ravb_open,
|
|
|
|
.ndo_stop = ravb_close,
|
|
|
|
.ndo_start_xmit = ravb_start_xmit,
|
|
|
|
.ndo_select_queue = ravb_select_queue,
|
|
|
|
.ndo_get_stats = ravb_get_stats,
|
|
|
|
.ndo_set_rx_mode = ravb_set_rx_mode,
|
|
|
|
.ndo_tx_timeout = ravb_tx_timeout,
|
|
|
|
.ndo_do_ioctl = ravb_do_ioctl,
|
2018-02-17 00:10:08 +08:00
|
|
|
.ndo_change_mtu = ravb_change_mtu,
|
2015-06-11 06:01:43 +08:00
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
2017-10-04 15:54:27 +08:00
|
|
|
.ndo_set_features = ravb_set_features,
|
2015-06-11 06:01:43 +08:00
|
|
|
};
|
|
|
|
|
Revert "ravb: Fixed to be able to unload modules"
This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc.
This commit moved the ravb_mdio_init() call (and thus the
of_mdiobus_register() call) from the ravb_probe() to the ravb_open()
call. This causes a regression during system resume (s2idle/s2ram), as
new PHY devices cannot be bound while suspended.
During boot, the Micrel PHY is detected like this:
Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228)
ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off
During system suspend, (A) defer_all_probes is set to true, and (B)
usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being
probed while suspended.
A. If CONFIG_MODULES=n, phy_device_register() calling device_add()
merely adds the device, but does not probe it yet, as
really_probe() returns early due to defer_all_probes being set:
dpm_resume+0x128/0x4f8
device_resume+0xcc/0x1b0
dpm_run_callback+0x74/0x340
ravb_resume+0x190/0x1b8
ravb_open+0x84/0x770
of_mdiobus_register+0x1e0/0x468
of_mdiobus_register_phy+0x1b8/0x250
of_mdiobus_phy_device_register+0x178/0x1e8
phy_device_register+0x114/0x1b8
device_add+0x3d4/0x798
bus_probe_device+0x98/0xa0
device_initial_probe+0x10/0x18
__device_attach+0xe4/0x140
bus_for_each_drv+0x64/0xc8
__device_attach_driver+0xb8/0xe0
driver_probe_device.part.11+0xc4/0xd8
really_probe+0x32c/0x3b8
Later, phy_attach_direct() notices no PHY driver has been bound,
and falls back to the Generic PHY, leading to degraded operation:
Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL)
ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off
B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due
to UMH_DISABLED, and MDIO initialization fails completely:
mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622
ravb e6800000.ethernet eth0: failed to initialize MDIO
PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16
PM: Device e6800000.ethernet failed to resume: error -16
Ignoring -EBUSY in phy_request_driver_module(), like was done for
-ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading
PHY driver w/o initramfs"), would makes it fall back to the Generic
PHY, like in the CONFIG_MODULES=n case.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: stable@vger.kernel.org
Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 15:29:31 +08:00
|
|
|
/* MDIO bus init function */
|
|
|
|
static int ravb_mdio_init(struct ravb_private *priv)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = priv->pdev;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Bitbang init */
|
|
|
|
priv->mdiobb.ops = &bb_ops;
|
|
|
|
|
|
|
|
/* MII controller setting */
|
|
|
|
priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
|
|
|
|
if (!priv->mii_bus)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Hook up MII support for ethtool */
|
|
|
|
priv->mii_bus->name = "ravb_mii";
|
|
|
|
priv->mii_bus->parent = dev;
|
|
|
|
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
|
|
|
pdev->name, pdev->id);
|
|
|
|
|
|
|
|
/* Register MDIO bus */
|
|
|
|
error = of_mdiobus_register(priv->mii_bus, dev->of_node);
|
|
|
|
if (error)
|
|
|
|
goto out_free_bus;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_free_bus:
|
|
|
|
free_mdio_bitbang(priv->mii_bus);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* MDIO bus release function */
|
|
|
|
static int ravb_mdio_release(struct ravb_private *priv)
|
|
|
|
{
|
|
|
|
/* Unregister mdio bus */
|
|
|
|
mdiobus_unregister(priv->mii_bus);
|
|
|
|
|
|
|
|
/* Free bitbang info */
|
|
|
|
free_mdio_bitbang(priv->mii_bus);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-30 14:15:55 +08:00
|
|
|
static const struct of_device_id ravb_match_table[] = {
|
|
|
|
{ .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
|
|
|
|
{ .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
|
2015-12-02 13:58:32 +08:00
|
|
|
{ .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 },
|
2015-09-30 14:15:55 +08:00
|
|
|
{ .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
|
2015-12-02 13:58:32 +08:00
|
|
|
{ .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 },
|
2015-09-30 14:15:55 +08:00
|
|
|
{ }
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, ravb_match_table);
|
|
|
|
|
2015-11-21 03:29:39 +08:00
|
|
|
static int ravb_set_gti(struct net_device *ndev)
|
|
|
|
{
|
2017-10-12 16:24:53 +08:00
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2015-11-21 03:29:39 +08:00
|
|
|
struct device *dev = ndev->dev.parent;
|
|
|
|
unsigned long rate;
|
|
|
|
uint64_t inc;
|
|
|
|
|
2017-10-12 16:24:53 +08:00
|
|
|
rate = clk_get_rate(priv->clk);
|
2016-04-08 19:28:42 +08:00
|
|
|
if (!rate)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-11-21 03:29:39 +08:00
|
|
|
inc = 1000000000ULL << 20;
|
|
|
|
do_div(inc, rate);
|
|
|
|
|
|
|
|
if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
|
|
|
|
dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
|
|
|
|
inc, GTI_TIV_MIN, GTI_TIV_MAX);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ravb_write(ndev, inc, GTI);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-03 21:56:47 +08:00
|
|
|
static void ravb_set_config_mode(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
if (priv->chip_id == RCAR_GEN2) {
|
|
|
|
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
|
|
|
|
/* Set CSEL value */
|
|
|
|
ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
|
|
|
|
} else {
|
|
|
|
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
|
|
|
|
CCC_GAC | CCC_CSEL_HPB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-23 21:01:53 +08:00
|
|
|
static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
|
|
|
|
{ .soc_id = "r8a774c0" },
|
|
|
|
{ .soc_id = "r8a77990" },
|
|
|
|
{ .soc_id = "r8a77995" },
|
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
|
2017-01-28 03:46:26 +08:00
|
|
|
/* Set tx and rx clock internal delay modes */
|
2020-10-01 18:10:08 +08:00
|
|
|
static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
|
2017-01-28 03:46:26 +08:00
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
2020-10-01 18:10:08 +08:00
|
|
|
bool explicit_delay = false;
|
|
|
|
u32 delay;
|
|
|
|
|
|
|
|
if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
|
|
|
|
/* Valid values are 0 and 1800, according to DT bindings */
|
|
|
|
priv->rxcidm = !!delay;
|
|
|
|
explicit_delay = true;
|
|
|
|
}
|
|
|
|
if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
|
|
|
|
/* Valid values are 0 and 2000, according to DT bindings */
|
|
|
|
priv->txcidm = !!delay;
|
|
|
|
explicit_delay = true;
|
|
|
|
}
|
2017-01-28 03:46:26 +08:00
|
|
|
|
2020-10-01 18:10:08 +08:00
|
|
|
if (explicit_delay)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Fall back to legacy rgmii-*id behavior */
|
2017-01-28 03:46:26 +08:00
|
|
|
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
|
2020-10-01 18:10:08 +08:00
|
|
|
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
|
2020-10-01 18:10:07 +08:00
|
|
|
priv->rxcidm = 1;
|
2020-10-01 18:10:08 +08:00
|
|
|
priv->rgmii_override = 1;
|
|
|
|
}
|
2017-01-28 03:46:26 +08:00
|
|
|
|
|
|
|
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
|
2019-04-23 21:01:53 +08:00
|
|
|
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
|
|
|
|
if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
|
|
|
|
"phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
|
2020-10-01 18:10:08 +08:00
|
|
|
phy_modes(priv->phy_interface))) {
|
2020-10-01 18:10:07 +08:00
|
|
|
priv->txcidm = 1;
|
2020-10-01 18:10:08 +08:00
|
|
|
priv->rgmii_override = 1;
|
|
|
|
}
|
2019-04-23 21:01:53 +08:00
|
|
|
}
|
2020-10-01 18:10:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ravb_set_delay_mode(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
u32 set = 0;
|
2017-01-28 03:46:26 +08:00
|
|
|
|
2020-10-01 18:10:07 +08:00
|
|
|
if (priv->rxcidm)
|
2021-01-07 04:31:37 +08:00
|
|
|
set |= APSR_RDM;
|
2020-10-01 18:10:07 +08:00
|
|
|
if (priv->txcidm)
|
2021-01-07 04:31:37 +08:00
|
|
|
set |= APSR_TDM;
|
|
|
|
ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
|
2017-01-28 03:46:26 +08:00
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
static int ravb_probe(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct device_node *np = pdev->dev.of_node;
|
|
|
|
struct ravb_private *priv;
|
2015-09-30 14:15:55 +08:00
|
|
|
enum ravb_chip_id chip_id;
|
2015-06-11 06:01:43 +08:00
|
|
|
struct net_device *ndev;
|
|
|
|
int error, irq, q;
|
|
|
|
struct resource *res;
|
2016-04-03 22:54:38 +08:00
|
|
|
int i;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
if (!np) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"this driver is required to be instantiated from device tree\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get base address */
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
if (!res) {
|
|
|
|
dev_err(&pdev->dev, "invalid resource\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
|
|
|
|
NUM_TX_QUEUE, NUM_RX_QUEUE);
|
|
|
|
if (!ndev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-10-04 15:54:27 +08:00
|
|
|
ndev->features = NETIF_F_RXCSUM;
|
|
|
|
ndev->hw_features = NETIF_F_RXCSUM;
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
pm_runtime_enable(&pdev->dev);
|
|
|
|
pm_runtime_get_sync(&pdev->dev);
|
|
|
|
|
|
|
|
/* The Ether-specific entries in the device structure. */
|
|
|
|
ndev->base_addr = res->start;
|
2015-09-30 14:15:55 +08:00
|
|
|
|
2016-03-02 00:37:58 +08:00
|
|
|
chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
|
2015-09-30 14:15:55 +08:00
|
|
|
|
|
|
|
if (chip_id == RCAR_GEN3)
|
|
|
|
irq = platform_get_irq_byname(pdev, "ch22");
|
|
|
|
else
|
|
|
|
irq = platform_get_irq(pdev, 0);
|
2015-06-11 06:01:43 +08:00
|
|
|
if (irq < 0) {
|
2015-08-28 21:55:10 +08:00
|
|
|
error = irq;
|
2015-06-11 06:01:43 +08:00
|
|
|
goto out_release;
|
|
|
|
}
|
|
|
|
ndev->irq = irq;
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(ndev, &pdev->dev);
|
|
|
|
|
|
|
|
priv = netdev_priv(ndev);
|
|
|
|
priv->ndev = ndev;
|
|
|
|
priv->pdev = pdev;
|
|
|
|
priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
|
|
|
|
priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
|
|
|
|
priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
|
|
|
|
priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
|
|
|
|
priv->addr = devm_ioremap_resource(&pdev->dev, res);
|
|
|
|
if (IS_ERR(priv->addr)) {
|
|
|
|
error = PTR_ERR(priv->addr);
|
|
|
|
goto out_release;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_init(&priv->lock);
|
|
|
|
INIT_WORK(&priv->work, ravb_tx_timeout_work);
|
|
|
|
|
net: of_get_phy_mode: Change API to solve int/unit warnings
Before this change of_get_phy_mode() returned an enum,
phy_interface_t. On error, -ENODEV etc, is returned. If the result of
the function is stored in a variable of type phy_interface_t, and the
compiler has decided to represent this as an unsigned int, comparision
with -ENODEV etc, is a signed vs unsigned comparision.
Fix this problem by changing the API. Make the function return an
error, or 0 on success, and pass a pointer, of type phy_interface_t,
where the phy mode should be stored.
v2:
Return with *interface set to PHY_INTERFACE_MODE_NA on error.
Add error checks to all users of of_get_phy_mode()
Fixup a few reverse christmas tree errors
Fixup a few slightly malformed reverse christmas trees
v3:
Fix 0-day reported errors.
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-04 09:40:33 +08:00
|
|
|
error = of_get_phy_mode(np, &priv->phy_interface);
|
|
|
|
if (error && error != -ENODEV)
|
|
|
|
goto out_release;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
|
|
|
|
priv->avb_link_active_low =
|
|
|
|
of_property_read_bool(np, "renesas,ether-link-active-low");
|
|
|
|
|
2015-09-30 14:15:55 +08:00
|
|
|
if (chip_id == RCAR_GEN3) {
|
|
|
|
irq = platform_get_irq_byname(pdev, "ch24");
|
|
|
|
if (irq < 0) {
|
|
|
|
error = irq;
|
|
|
|
goto out_release;
|
|
|
|
}
|
|
|
|
priv->emac_irq = irq;
|
2016-04-03 22:54:38 +08:00
|
|
|
for (i = 0; i < NUM_RX_QUEUE; i++) {
|
|
|
|
irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
|
|
|
|
if (irq < 0) {
|
|
|
|
error = irq;
|
|
|
|
goto out_release;
|
|
|
|
}
|
|
|
|
priv->rx_irqs[i] = irq;
|
|
|
|
}
|
|
|
|
for (i = 0; i < NUM_TX_QUEUE; i++) {
|
|
|
|
irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
|
|
|
|
if (irq < 0) {
|
|
|
|
error = irq;
|
|
|
|
goto out_release;
|
|
|
|
}
|
|
|
|
priv->tx_irqs[i] = irq;
|
|
|
|
}
|
2015-09-30 14:15:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
priv->chip_id = chip_id;
|
|
|
|
|
2017-08-01 18:14:36 +08:00
|
|
|
priv->clk = devm_clk_get(&pdev->dev, NULL);
|
2017-10-12 16:24:53 +08:00
|
|
|
if (IS_ERR(priv->clk)) {
|
|
|
|
error = PTR_ERR(priv->clk);
|
|
|
|
goto out_release;
|
|
|
|
}
|
2017-08-01 18:14:36 +08:00
|
|
|
|
2021-04-12 21:26:19 +08:00
|
|
|
priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
|
|
|
|
if (IS_ERR(priv->refclk)) {
|
|
|
|
error = PTR_ERR(priv->refclk);
|
|
|
|
goto out_release;
|
|
|
|
}
|
|
|
|
clk_prepare_enable(priv->refclk);
|
|
|
|
|
2018-02-17 00:10:08 +08:00
|
|
|
ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
|
|
|
|
ndev->min_mtu = ETH_MIN_MTU;
|
|
|
|
|
2018-09-19 16:06:21 +08:00
|
|
|
priv->num_tx_desc = chip_id == RCAR_GEN2 ?
|
|
|
|
NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3;
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Set function */
|
|
|
|
ndev->netdev_ops = &ravb_netdev_ops;
|
|
|
|
ndev->ethtool_ops = &ravb_ethtool_ops;
|
|
|
|
|
|
|
|
/* Set AVB config mode */
|
2016-08-03 21:56:47 +08:00
|
|
|
ravb_set_config_mode(ndev);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
/* Set GTI value */
|
2015-11-21 03:29:39 +08:00
|
|
|
error = ravb_set_gti(ndev);
|
|
|
|
if (error)
|
2021-04-21 22:05:05 +08:00
|
|
|
goto out_disable_refclk;
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
/* Request GTI loading */
|
2016-02-10 06:37:44 +08:00
|
|
|
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
2020-10-01 18:10:07 +08:00
|
|
|
if (priv->chip_id != RCAR_GEN2) {
|
2020-10-01 18:10:08 +08:00
|
|
|
ravb_parse_delay_mode(np, ndev);
|
2017-01-28 03:46:26 +08:00
|
|
|
ravb_set_delay_mode(ndev);
|
2020-10-01 18:10:07 +08:00
|
|
|
}
|
2017-01-28 03:46:26 +08:00
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Allocate descriptor base address table */
|
|
|
|
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
|
2015-09-30 14:15:53 +08:00
|
|
|
priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
|
2015-06-11 06:01:43 +08:00
|
|
|
&priv->desc_bat_dma, GFP_KERNEL);
|
|
|
|
if (!priv->desc_bat) {
|
2015-11-02 09:40:17 +08:00
|
|
|
dev_err(&pdev->dev,
|
2015-06-11 06:01:43 +08:00
|
|
|
"Cannot allocate desc base address table (size %d bytes)\n",
|
|
|
|
priv->desc_bat_size);
|
|
|
|
error = -ENOMEM;
|
2021-04-21 22:05:05 +08:00
|
|
|
goto out_disable_refclk;
|
2015-06-11 06:01:43 +08:00
|
|
|
}
|
|
|
|
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
|
|
|
|
priv->desc_bat[q].die_dt = DT_EOS;
|
|
|
|
ravb_write(ndev, priv->desc_bat_dma, DBAT);
|
|
|
|
|
|
|
|
/* Initialise HW timestamp list */
|
|
|
|
INIT_LIST_HEAD(&priv->ts_skb_list);
|
|
|
|
|
2015-12-02 01:04:39 +08:00
|
|
|
/* Initialise PTP Clock driver */
|
|
|
|
if (chip_id != RCAR_GEN2)
|
|
|
|
ravb_ptp_init(ndev, pdev);
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Debug message level */
|
|
|
|
priv->msg_enable = RAVB_DEF_MSG_ENABLE;
|
|
|
|
|
|
|
|
/* Read and set MAC address */
|
of: net: pass the dst buffer to of_get_mac_address()
of_get_mac_address() returns a "const void*" pointer to a MAC address.
Lately, support to fetch the MAC address by an NVMEM provider was added.
But this will only work with platform devices. It will not work with
PCI devices (e.g. of an integrated root complex) and esp. not with DSA
ports.
There is an of_* variant of the nvmem binding which works without
devices. The returned data of a nvmem_cell_read() has to be freed after
use. On the other hand the return of_get_mac_address() points to some
static data without a lifetime. The trick for now, was to allocate a
device resource managed buffer which is then returned. This will only
work if we have an actual device.
Change it, so that the caller of of_get_mac_address() has to supply a
buffer where the MAC address is written to. Unfortunately, this will
touch all drivers which use the of_get_mac_address().
Usually the code looks like:
const char *addr;
addr = of_get_mac_address(np);
if (!IS_ERR(addr))
ether_addr_copy(ndev->dev_addr, addr);
This can then be simply rewritten as:
of_get_mac_address(np, ndev->dev_addr);
Sometimes is_valid_ether_addr() is used to test the MAC address.
of_get_mac_address() already makes sure, it just returns a valid MAC
address. Thus we can just test its return code. But we have to be
careful if there are still other sources for the MAC address before the
of_get_mac_address(). In this case we have to keep the
is_valid_ether_addr() call.
The following coccinelle patch was used to convert common cases to the
new style. Afterwards, I've manually gone over the drivers and fixed the
return code variable: either used a new one or if one was already
available use that. Mansour Moufid, thanks for that coccinelle patch!
<spml>
@a@
identifier x;
expression y, z;
@@
- x = of_get_mac_address(y);
+ x = of_get_mac_address(y, z);
<...
- ether_addr_copy(z, x);
...>
@@
identifier a.x;
@@
- if (<+... x ...+>) {}
@@
identifier a.x;
@@
if (<+... x ...+>) {
...
}
- else {}
@@
identifier a.x;
expression e;
@@
- if (<+... x ...+>@e)
- {}
- else
+ if (!(e))
{...}
@@
expression x, y, z;
@@
- x = of_get_mac_address(y, z);
+ of_get_mac_address(y, z);
... when != x
</spml>
All drivers, except drivers/net/ethernet/aeroflex/greth.c, were
compile-time tested.
Suggested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Michael Walle <michael@walle.cc>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 01:47:17 +08:00
|
|
|
ravb_read_mac_address(np, ndev);
|
2015-06-11 06:01:43 +08:00
|
|
|
if (!is_valid_ether_addr(ndev->dev_addr)) {
|
|
|
|
dev_warn(&pdev->dev,
|
|
|
|
"no valid MAC address supplied, using a random one\n");
|
|
|
|
eth_hw_addr_random(ndev);
|
|
|
|
}
|
|
|
|
|
Revert "ravb: Fixed to be able to unload modules"
This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc.
This commit moved the ravb_mdio_init() call (and thus the
of_mdiobus_register() call) from the ravb_probe() to the ravb_open()
call. This causes a regression during system resume (s2idle/s2ram), as
new PHY devices cannot be bound while suspended.
During boot, the Micrel PHY is detected like this:
Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228)
ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off
During system suspend, (A) defer_all_probes is set to true, and (B)
usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being
probed while suspended.
A. If CONFIG_MODULES=n, phy_device_register() calling device_add()
merely adds the device, but does not probe it yet, as
really_probe() returns early due to defer_all_probes being set:
dpm_resume+0x128/0x4f8
device_resume+0xcc/0x1b0
dpm_run_callback+0x74/0x340
ravb_resume+0x190/0x1b8
ravb_open+0x84/0x770
of_mdiobus_register+0x1e0/0x468
of_mdiobus_register_phy+0x1b8/0x250
of_mdiobus_phy_device_register+0x178/0x1e8
phy_device_register+0x114/0x1b8
device_add+0x3d4/0x798
bus_probe_device+0x98/0xa0
device_initial_probe+0x10/0x18
__device_attach+0xe4/0x140
bus_for_each_drv+0x64/0xc8
__device_attach_driver+0xb8/0xe0
driver_probe_device.part.11+0xc4/0xd8
really_probe+0x32c/0x3b8
Later, phy_attach_direct() notices no PHY driver has been bound,
and falls back to the Generic PHY, leading to degraded operation:
Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL)
ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off
B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due
to UMH_DISABLED, and MDIO initialization fails completely:
mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622
ravb e6800000.ethernet eth0: failed to initialize MDIO
PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16
PM: Device e6800000.ethernet failed to resume: error -16
Ignoring -EBUSY in phy_request_driver_module(), like was done for
-ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading
PHY driver w/o initramfs"), would makes it fall back to the Generic
PHY, like in the CONFIG_MODULES=n case.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: stable@vger.kernel.org
Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 15:29:31 +08:00
|
|
|
/* MDIO bus init */
|
|
|
|
error = ravb_mdio_init(priv);
|
|
|
|
if (error) {
|
|
|
|
dev_err(&pdev->dev, "failed to initialize MDIO\n");
|
|
|
|
goto out_dma_free;
|
|
|
|
}
|
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
|
|
|
|
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
|
|
|
|
|
|
|
|
/* Network device register */
|
|
|
|
error = register_netdev(ndev);
|
|
|
|
if (error)
|
|
|
|
goto out_napi_del;
|
|
|
|
|
2017-10-12 16:24:53 +08:00
|
|
|
device_set_wakeup_capable(&pdev->dev, 1);
|
2017-08-01 18:14:36 +08:00
|
|
|
|
2015-06-11 06:01:43 +08:00
|
|
|
/* Print device information */
|
|
|
|
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
|
|
|
|
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
|
|
|
|
|
|
|
|
platform_set_drvdata(pdev, ndev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_napi_del:
|
|
|
|
netif_napi_del(&priv->napi[RAVB_NC]);
|
|
|
|
netif_napi_del(&priv->napi[RAVB_BE]);
|
Revert "ravb: Fixed to be able to unload modules"
This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc.
This commit moved the ravb_mdio_init() call (and thus the
of_mdiobus_register() call) from the ravb_probe() to the ravb_open()
call. This causes a regression during system resume (s2idle/s2ram), as
new PHY devices cannot be bound while suspended.
During boot, the Micrel PHY is detected like this:
Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228)
ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off
During system suspend, (A) defer_all_probes is set to true, and (B)
usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being
probed while suspended.
A. If CONFIG_MODULES=n, phy_device_register() calling device_add()
merely adds the device, but does not probe it yet, as
really_probe() returns early due to defer_all_probes being set:
dpm_resume+0x128/0x4f8
device_resume+0xcc/0x1b0
dpm_run_callback+0x74/0x340
ravb_resume+0x190/0x1b8
ravb_open+0x84/0x770
of_mdiobus_register+0x1e0/0x468
of_mdiobus_register_phy+0x1b8/0x250
of_mdiobus_phy_device_register+0x178/0x1e8
phy_device_register+0x114/0x1b8
device_add+0x3d4/0x798
bus_probe_device+0x98/0xa0
device_initial_probe+0x10/0x18
__device_attach+0xe4/0x140
bus_for_each_drv+0x64/0xc8
__device_attach_driver+0xb8/0xe0
driver_probe_device.part.11+0xc4/0xd8
really_probe+0x32c/0x3b8
Later, phy_attach_direct() notices no PHY driver has been bound,
and falls back to the Generic PHY, leading to degraded operation:
Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL)
ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off
B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due
to UMH_DISABLED, and MDIO initialization fails completely:
mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622
ravb e6800000.ethernet eth0: failed to initialize MDIO
PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16
PM: Device e6800000.ethernet failed to resume: error -16
Ignoring -EBUSY in phy_request_driver_module(), like was done for
-ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading
PHY driver w/o initramfs"), would makes it fall back to the Generic
PHY, like in the CONFIG_MODULES=n case.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: stable@vger.kernel.org
Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 15:29:31 +08:00
|
|
|
ravb_mdio_release(priv);
|
|
|
|
out_dma_free:
|
2015-09-30 14:15:53 +08:00
|
|
|
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
|
2015-06-11 06:01:43 +08:00
|
|
|
priv->desc_bat_dma);
|
2015-12-02 01:04:39 +08:00
|
|
|
|
|
|
|
/* Stop PTP Clock driver */
|
|
|
|
if (chip_id != RCAR_GEN2)
|
|
|
|
ravb_ptp_stop(ndev);
|
2021-04-21 22:05:05 +08:00
|
|
|
out_disable_refclk:
|
2021-04-12 21:26:19 +08:00
|
|
|
clk_disable_unprepare(priv->refclk);
|
2021-04-21 22:05:05 +08:00
|
|
|
out_release:
|
2018-01-01 02:41:35 +08:00
|
|
|
free_netdev(ndev);
|
2015-06-11 06:01:43 +08:00
|
|
|
|
|
|
|
pm_runtime_put(&pdev->dev);
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ravb_remove(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = platform_get_drvdata(pdev);
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
2015-12-02 01:04:39 +08:00
|
|
|
/* Stop PTP Clock driver */
|
|
|
|
if (priv->chip_id != RCAR_GEN2)
|
|
|
|
ravb_ptp_stop(ndev);
|
|
|
|
|
2021-04-12 21:26:19 +08:00
|
|
|
clk_disable_unprepare(priv->refclk);
|
|
|
|
|
2015-09-30 14:15:53 +08:00
|
|
|
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
|
2015-06-11 06:01:43 +08:00
|
|
|
priv->desc_bat_dma);
|
|
|
|
/* Set reset mode */
|
|
|
|
ravb_write(ndev, CCC_OPC_RESET, CCC);
|
|
|
|
pm_runtime_put_sync(&pdev->dev);
|
|
|
|
unregister_netdev(ndev);
|
|
|
|
netif_napi_del(&priv->napi[RAVB_NC]);
|
|
|
|
netif_napi_del(&priv->napi[RAVB_BE]);
|
Revert "ravb: Fixed to be able to unload modules"
This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc.
This commit moved the ravb_mdio_init() call (and thus the
of_mdiobus_register() call) from the ravb_probe() to the ravb_open()
call. This causes a regression during system resume (s2idle/s2ram), as
new PHY devices cannot be bound while suspended.
During boot, the Micrel PHY is detected like this:
Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228)
ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off
During system suspend, (A) defer_all_probes is set to true, and (B)
usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being
probed while suspended.
A. If CONFIG_MODULES=n, phy_device_register() calling device_add()
merely adds the device, but does not probe it yet, as
really_probe() returns early due to defer_all_probes being set:
dpm_resume+0x128/0x4f8
device_resume+0xcc/0x1b0
dpm_run_callback+0x74/0x340
ravb_resume+0x190/0x1b8
ravb_open+0x84/0x770
of_mdiobus_register+0x1e0/0x468
of_mdiobus_register_phy+0x1b8/0x250
of_mdiobus_phy_device_register+0x178/0x1e8
phy_device_register+0x114/0x1b8
device_add+0x3d4/0x798
bus_probe_device+0x98/0xa0
device_initial_probe+0x10/0x18
__device_attach+0xe4/0x140
bus_for_each_drv+0x64/0xc8
__device_attach_driver+0xb8/0xe0
driver_probe_device.part.11+0xc4/0xd8
really_probe+0x32c/0x3b8
Later, phy_attach_direct() notices no PHY driver has been bound,
and falls back to the Generic PHY, leading to degraded operation:
Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL)
ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off
B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due
to UMH_DISABLED, and MDIO initialization fails completely:
mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622
ravb e6800000.ethernet eth0: failed to initialize MDIO
PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16
PM: Device e6800000.ethernet failed to resume: error -16
Ignoring -EBUSY in phy_request_driver_module(), like was done for
-ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading
PHY driver w/o initramfs"), would makes it fall back to the Generic
PHY, like in the CONFIG_MODULES=n case.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: stable@vger.kernel.org
Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 15:29:31 +08:00
|
|
|
ravb_mdio_release(priv);
|
2015-06-11 06:01:43 +08:00
|
|
|
pm_runtime_disable(&pdev->dev);
|
|
|
|
free_netdev(ndev);
|
|
|
|
platform_set_drvdata(pdev, NULL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-01 18:14:36 +08:00
|
|
|
static int ravb_wol_setup(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
|
|
|
|
/* Disable interrupts by clearing the interrupt masks. */
|
|
|
|
ravb_write(ndev, 0, RIC0);
|
|
|
|
ravb_write(ndev, 0, RIC2);
|
|
|
|
ravb_write(ndev, 0, TIC);
|
|
|
|
|
|
|
|
/* Only allow ECI interrupts */
|
|
|
|
synchronize_irq(priv->emac_irq);
|
|
|
|
napi_disable(&priv->napi[RAVB_NC]);
|
|
|
|
napi_disable(&priv->napi[RAVB_BE]);
|
|
|
|
ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
|
|
|
|
|
|
|
|
/* Enable MagicPacket */
|
|
|
|
ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
|
|
|
|
|
|
|
|
return enable_irq_wake(priv->emac_irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ravb_wol_restore(struct net_device *ndev)
|
|
|
|
{
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
napi_enable(&priv->napi[RAVB_NC]);
|
|
|
|
napi_enable(&priv->napi[RAVB_BE]);
|
|
|
|
|
|
|
|
/* Disable MagicPacket */
|
|
|
|
ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
|
|
|
|
|
|
|
|
ret = ravb_close(ndev);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return disable_irq_wake(priv->emac_irq);
|
|
|
|
}
|
|
|
|
|
2016-08-26 23:30:29 +08:00
|
|
|
static int __maybe_unused ravb_suspend(struct device *dev)
|
2016-08-03 21:56:47 +08:00
|
|
|
{
|
|
|
|
struct net_device *ndev = dev_get_drvdata(dev);
|
2017-08-01 18:14:36 +08:00
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
int ret;
|
2016-08-03 21:56:47 +08:00
|
|
|
|
2017-08-01 18:14:36 +08:00
|
|
|
if (!netif_running(ndev))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
netif_device_detach(ndev);
|
|
|
|
|
|
|
|
if (priv->wol_enabled)
|
|
|
|
ret = ravb_wol_setup(ndev);
|
|
|
|
else
|
2016-08-03 21:56:47 +08:00
|
|
|
ret = ravb_close(ndev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-26 23:30:29 +08:00
|
|
|
static int __maybe_unused ravb_resume(struct device *dev)
|
2016-08-03 21:56:47 +08:00
|
|
|
{
|
|
|
|
struct net_device *ndev = dev_get_drvdata(dev);
|
|
|
|
struct ravb_private *priv = netdev_priv(ndev);
|
|
|
|
int ret = 0;
|
|
|
|
|
2017-12-11 16:54:09 +08:00
|
|
|
/* If WoL is enabled set reset mode to rearm the WoL logic */
|
|
|
|
if (priv->wol_enabled)
|
2017-08-01 18:14:36 +08:00
|
|
|
ravb_write(ndev, CCC_OPC_RESET, CCC);
|
|
|
|
|
2016-08-03 21:56:47 +08:00
|
|
|
/* All register have been reset to default values.
|
|
|
|
* Restore all registers which where setup at probe time and
|
|
|
|
* reopen device if it was running before system suspended.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Set AVB config mode */
|
|
|
|
ravb_set_config_mode(ndev);
|
|
|
|
|
|
|
|
/* Set GTI value */
|
|
|
|
ret = ravb_set_gti(ndev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Request GTI loading */
|
|
|
|
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
|
|
|
|
|
2017-01-28 03:46:26 +08:00
|
|
|
if (priv->chip_id != RCAR_GEN2)
|
|
|
|
ravb_set_delay_mode(ndev);
|
|
|
|
|
2016-08-03 21:56:47 +08:00
|
|
|
/* Restore descriptor base address table */
|
|
|
|
ravb_write(ndev, priv->desc_bat_dma, DBAT);
|
|
|
|
|
|
|
|
if (netif_running(ndev)) {
|
2017-08-01 18:14:36 +08:00
|
|
|
if (priv->wol_enabled) {
|
|
|
|
ret = ravb_wol_restore(ndev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-08-03 21:56:47 +08:00
|
|
|
ret = ravb_open(ndev);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
netif_device_attach(ndev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-26 23:30:29 +08:00
|
|
|
static int __maybe_unused ravb_runtime_nop(struct device *dev)
|
2015-06-11 06:01:43 +08:00
|
|
|
{
|
|
|
|
/* Runtime PM callback shared between ->runtime_suspend()
|
|
|
|
* and ->runtime_resume(). Simply returns success.
|
|
|
|
*
|
|
|
|
* This driver re-initializes all registers after
|
|
|
|
* pm_runtime_get_sync() anyway so there is no need
|
|
|
|
* to save and restore registers here.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct dev_pm_ops ravb_dev_pm_ops = {
|
2016-08-10 19:09:49 +08:00
|
|
|
SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
|
2016-05-30 04:25:43 +08:00
|
|
|
SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
|
2015-06-11 06:01:43 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct platform_driver ravb_driver = {
|
|
|
|
.probe = ravb_probe,
|
|
|
|
.remove = ravb_remove,
|
|
|
|
.driver = {
|
|
|
|
.name = "ravb",
|
2016-08-26 23:30:29 +08:00
|
|
|
.pm = &ravb_dev_pm_ops,
|
2015-06-11 06:01:43 +08:00
|
|
|
.of_match_table = ravb_match_table,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
module_platform_driver(ravb_driver);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
|
|
|
|
MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
|
|
|
|
MODULE_LICENSE("GPL v2");
|