mirror of https://gitee.com/openkylin/linux.git
stmmac: fix dma api misuse
Enabling DMA_API_DEBUG, warnings are reported at runtime because the device driver frees DMA memory with wrong functions and it does not call dma_mapping_error after mapping dma memory. The first problem is fixed by of introducing a flag that helps us keeping track which mapping technique was used, so that we can use the right API for unmap. This approach was inspired by the e1000 driver, which uses a similar technique. Signed-off-by: Andre Draszik <andre.draszik@st.com> Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Reviewed-by: Denis Kirjanov <kda@linux-powerpc.org> Cc: Hans de Goede <hdegoede@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5566401f2f
commit
362b37be01
|
@ -28,7 +28,7 @@
|
|||
|
||||
#include "stmmac.h"
|
||||
|
||||
static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
{
|
||||
struct stmmac_priv *priv = (struct stmmac_priv *)p;
|
||||
unsigned int txsize = priv->dma_tx_size;
|
||||
|
@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
|||
|
||||
desc->des2 = dma_map_single(priv->device, skb->data,
|
||||
bmax, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
|
||||
|
||||
while (len != 0) {
|
||||
|
@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
|||
desc->des2 = dma_map_single(priv->device,
|
||||
(skb->data + bmax * i),
|
||||
bmax, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
|
||||
STMMAC_CHAIN_MODE);
|
||||
priv->hw->desc->set_tx_owner(desc);
|
||||
|
@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
|||
desc->des2 = dma_map_single(priv->device,
|
||||
(skb->data + bmax * i), len,
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
|
||||
STMMAC_CHAIN_MODE);
|
||||
priv->hw->desc->set_tx_owner(desc);
|
||||
|
|
|
@ -425,7 +425,7 @@ struct stmmac_mode_ops {
|
|||
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
|
||||
unsigned int extend_desc);
|
||||
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
|
||||
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
|
||||
int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
|
||||
int (*set_16kib_bfsize)(int mtu);
|
||||
void (*init_desc3)(struct dma_desc *p);
|
||||
void (*refill_desc3) (void *priv, struct dma_desc *p);
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#include "stmmac.h"
|
||||
|
||||
static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
{
|
||||
struct stmmac_priv *priv = (struct stmmac_priv *)p;
|
||||
unsigned int txsize = priv->dma_tx_size;
|
||||
|
@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
|||
|
||||
desc->des2 = dma_map_single(priv->device, skb->data,
|
||||
bmax, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
|
||||
STMMAC_RING_MODE);
|
||||
|
@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
|||
|
||||
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
|
||||
len, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
|
||||
STMMAC_RING_MODE);
|
||||
|
@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
|||
} else {
|
||||
desc->des2 = dma_map_single(priv->device, skb->data,
|
||||
nopaged_len, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
|
||||
STMMAC_RING_MODE);
|
||||
|
|
|
@ -34,6 +34,11 @@
|
|||
#include <linux/ptp_clock_kernel.h>
|
||||
#include <linux/reset.h>
|
||||
|
||||
struct stmmac_tx_info {
|
||||
dma_addr_t buf;
|
||||
bool map_as_page;
|
||||
};
|
||||
|
||||
struct stmmac_priv {
|
||||
/* Frequently used values are kept adjacent for cache effect */
|
||||
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
|
||||
|
@ -45,7 +50,7 @@ struct stmmac_priv {
|
|||
u32 tx_count_frames;
|
||||
u32 tx_coal_frames;
|
||||
u32 tx_coal_timer;
|
||||
dma_addr_t *tx_skbuff_dma;
|
||||
struct stmmac_tx_info *tx_skbuff_dma;
|
||||
dma_addr_t dma_tx_phy;
|
||||
int tx_coalesce;
|
||||
int hwts_tx_en;
|
||||
|
|
|
@ -1073,7 +1073,8 @@ static int init_dma_desc_rings(struct net_device *dev)
|
|||
else
|
||||
p = priv->dma_tx + i;
|
||||
p->des2 = 0;
|
||||
priv->tx_skbuff_dma[i] = 0;
|
||||
priv->tx_skbuff_dma[i].buf = 0;
|
||||
priv->tx_skbuff_dma[i].map_as_page = false;
|
||||
priv->tx_skbuff[i] = NULL;
|
||||
}
|
||||
|
||||
|
@ -1112,17 +1113,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
|
|||
else
|
||||
p = priv->dma_tx + i;
|
||||
|
||||
if (priv->tx_skbuff_dma[i]) {
|
||||
dma_unmap_single(priv->device,
|
||||
priv->tx_skbuff_dma[i],
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[i] = 0;
|
||||
if (priv->tx_skbuff_dma[i].buf) {
|
||||
if (priv->tx_skbuff_dma[i].map_as_page)
|
||||
dma_unmap_page(priv->device,
|
||||
priv->tx_skbuff_dma[i].buf,
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_single(priv->device,
|
||||
priv->tx_skbuff_dma[i].buf,
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (priv->tx_skbuff[i] != NULL) {
|
||||
dev_kfree_skb_any(priv->tx_skbuff[i]);
|
||||
priv->tx_skbuff[i] = NULL;
|
||||
priv->tx_skbuff_dma[i].buf = 0;
|
||||
priv->tx_skbuff_dma[i].map_as_page = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1143,7 +1151,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
|
|||
if (!priv->rx_skbuff)
|
||||
goto err_rx_skbuff;
|
||||
|
||||
priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
|
||||
priv->tx_skbuff_dma = kmalloc_array(txsize,
|
||||
sizeof(*priv->tx_skbuff_dma),
|
||||
GFP_KERNEL);
|
||||
if (!priv->tx_skbuff_dma)
|
||||
goto err_tx_skbuff_dma;
|
||||
|
@ -1305,12 +1314,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
|
|||
pr_debug("%s: curr %d, dirty %d\n", __func__,
|
||||
priv->cur_tx, priv->dirty_tx);
|
||||
|
||||
if (likely(priv->tx_skbuff_dma[entry])) {
|
||||
dma_unmap_single(priv->device,
|
||||
priv->tx_skbuff_dma[entry],
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = 0;
|
||||
if (likely(priv->tx_skbuff_dma[entry].buf)) {
|
||||
if (priv->tx_skbuff_dma[entry].map_as_page)
|
||||
dma_unmap_page(priv->device,
|
||||
priv->tx_skbuff_dma[entry].buf,
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_single(priv->device,
|
||||
priv->tx_skbuff_dma[entry].buf,
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry].buf = 0;
|
||||
priv->tx_skbuff_dma[entry].map_as_page = false;
|
||||
}
|
||||
priv->hw->mode->clean_desc3(priv, p);
|
||||
|
||||
|
@ -1905,12 +1921,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (likely(!is_jumbo)) {
|
||||
desc->des2 = dma_map_single(priv->device, skb->data,
|
||||
nopaged_len, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
goto dma_map_err;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
|
||||
csum_insertion, priv->mode);
|
||||
} else {
|
||||
desc = first;
|
||||
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
|
||||
if (unlikely(entry < 0))
|
||||
goto dma_map_err;
|
||||
}
|
||||
|
||||
for (i = 0; i < nfrags; i++) {
|
||||
|
@ -1926,7 +1946,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
goto dma_map_err; /* should reuse desc w/o issues */
|
||||
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->tx_skbuff_dma[entry].map_as_page = true;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
|
||||
priv->mode);
|
||||
wmb();
|
||||
|
@ -1993,7 +2017,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
|
||||
|
||||
spin_unlock(&priv->tx_lock);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dma_map_err:
|
||||
dev_err(priv->device, "Tx dma map failed\n");
|
||||
dev_kfree_skb(skb);
|
||||
priv->dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -2046,7 +2075,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
|
|||
priv->rx_skbuff_dma[entry] =
|
||||
dma_map_single(priv->device, skb->data, bfsize,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (dma_mapping_error(priv->device,
|
||||
priv->rx_skbuff_dma[entry])) {
|
||||
dev_err(priv->device, "Rx dma map failed\n");
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
p->des2 = priv->rx_skbuff_dma[entry];
|
||||
|
||||
priv->hw->mode->refill_desc3(priv, p);
|
||||
|
|
Loading…
Reference in New Issue