net: systemport: log RX buffer allocation and RX/TX DMA failures
To help troubleshoot heavy memory pressure conditions, add a bunch of statistics counter to log RX buffer allocation and RX/TX DMA mapping failures. These are reported like any other counters through the ethtool stats interface. Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9c7077622d
commit
60b4ea1781
|
@ -274,6 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
|
||||||
/* RBUF misc statistics */
|
/* RBUF misc statistics */
|
||||||
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
|
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
|
||||||
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
|
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
|
||||||
|
STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
||||||
|
STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
|
||||||
|
STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
|
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
|
||||||
|
@ -477,6 +480,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
|
||||||
RX_BUF_LENGTH, DMA_FROM_DEVICE);
|
RX_BUF_LENGTH, DMA_FROM_DEVICE);
|
||||||
ret = dma_mapping_error(kdev, mapping);
|
ret = dma_mapping_error(kdev, mapping);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
priv->mib.rx_dma_failed++;
|
||||||
bcm_sysport_free_cb(cb);
|
bcm_sysport_free_cb(cb);
|
||||||
netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
|
netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -526,6 +530,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
|
||||||
unsigned int p_index;
|
unsigned int p_index;
|
||||||
u16 len, status;
|
u16 len, status;
|
||||||
struct bcm_rsb *rsb;
|
struct bcm_rsb *rsb;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* Determine how much we should process since last call */
|
/* Determine how much we should process since last call */
|
||||||
p_index = rdma_readl(priv, RDMA_PROD_INDEX);
|
p_index = rdma_readl(priv, RDMA_PROD_INDEX);
|
||||||
|
@ -620,7 +625,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
|
||||||
|
|
||||||
napi_gro_receive(&priv->napi, skb);
|
napi_gro_receive(&priv->napi, skb);
|
||||||
refill:
|
refill:
|
||||||
bcm_sysport_rx_refill(priv, cb);
|
ret = bcm_sysport_rx_refill(priv, cb);
|
||||||
|
if (ret)
|
||||||
|
priv->mib.alloc_rx_buff_failed++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return processed;
|
return processed;
|
||||||
|
@ -973,6 +980,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
|
||||||
|
|
||||||
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
|
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(kdev, mapping)) {
|
if (dma_mapping_error(kdev, mapping)) {
|
||||||
|
priv->mib.tx_dma_failed++;
|
||||||
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
|
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
|
||||||
skb->data, skb_len);
|
skb->data, skb_len);
|
||||||
ret = NETDEV_TX_OK;
|
ret = NETDEV_TX_OK;
|
||||||
|
|
|
@ -557,6 +557,9 @@ struct bcm_sysport_mib {
|
||||||
u32 rxchk_other_pkt_disc;
|
u32 rxchk_other_pkt_disc;
|
||||||
u32 rbuf_ovflow_cnt;
|
u32 rbuf_ovflow_cnt;
|
||||||
u32 rbuf_err_cnt;
|
u32 rbuf_err_cnt;
|
||||||
|
u32 alloc_rx_buff_failed;
|
||||||
|
u32 rx_dma_failed;
|
||||||
|
u32 tx_dma_failed;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* HW maintains a large list of counters */
|
/* HW maintains a large list of counters */
|
||||||
|
|
Loading…
Reference in New Issue