stmmac: fix memory barriers

Fix up memory barriers in stmmac driver. They are meant to protect
against DMA engine, so smp_ variants are certainly wrong, and dma_
variants are preferable.

Signed-off-by: Pavel Machek <pavel@denx.de>
Tested-by: Niklas Cassel <niklas.cassel@axis.com>
Acked-by: Giuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Pavel Machek 2016-12-18 21:38:12 +01:00 committed by David S. Miller
parent 162809dfc2
commit ad688cdbb0
3 changed files with 7 additions and 7 deletions

View File

@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to * descriptors for the same frame has to be set before, to
* avoid race condition. * avoid race condition.
*/ */
wmb(); dma_wmb();
p->des3 = cpu_to_le32(tdes3); p->des3 = cpu_to_le32(tdes3);
} }
@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
* descriptors for the same frame has to be set before, to * descriptors for the same frame has to be set before, to
* avoid race condition. * avoid race condition.
*/ */
wmb(); dma_wmb();
p->des3 = cpu_to_le32(tdes3); p->des3 = cpu_to_le32(tdes3);
} }

View File

@ -350,7 +350,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to * descriptors for the same frame has to be set before, to
* avoid race condition. * avoid race condition.
*/ */
wmb(); dma_wmb();
p->des0 = cpu_to_le32(tdes0); p->des0 = cpu_to_le32(tdes0);
} }

View File

@ -2125,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that * descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine. * all is coherent before granting the DMA engine.
*/ */
smp_wmb(); dma_wmb();
if (netif_msg_pktdata(priv)) { if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
@ -2338,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that * descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine. * all is coherent before granting the DMA engine.
*/ */
smp_wmb(); dma_wmb();
} }
netdev_sent_queue(dev, skb->len); netdev_sent_queue(dev, skb->len);
@ -2443,14 +2443,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
netif_dbg(priv, rx_status, priv->dev, netif_dbg(priv, rx_status, priv->dev,
"refill entry #%d\n", entry); "refill entry #%d\n", entry);
} }
wmb(); dma_wmb();
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
else else
priv->hw->desc->set_rx_owner(p); priv->hw->desc->set_rx_owner(p);
wmb(); dma_wmb();
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
} }