net: stmmac: use correct DMA buffer size in the RX descriptor

We always program the maximum DMA buffer size into the receive descriptor,
although the allocated size may be less. E.g. with the default MTU size
we allocate only 1536 bytes. If somebody sends us a bigger frame, then
memory may get corrupted.

Fix by using exact buffer sizes.

Signed-off-by: Aaro Koskinen <aaro.koskinen@nokia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Aaro Koskinen 2019-03-27 22:35:35 +02:00 committed by David S. Miller
parent 288ac524cf
commit 583e636141
7 changed files with 35 additions and 19 deletions

View File

@ -29,11 +29,13 @@
/* Specific functions used for Ring mode */ /* Specific functions used for Ring mode */
/* Enhanced descriptors */ /* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
int bfsize)
{ {
p->des1 |= cpu_to_le32((BUF_SIZE_8KiB if (bfsize == BUF_SIZE_16KiB)
<< ERDES1_BUFFER2_SIZE_SHIFT) p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
& ERDES1_BUFFER2_SIZE_MASK); << ERDES1_BUFFER2_SIZE_SHIFT)
& ERDES1_BUFFER2_SIZE_MASK);
if (end) if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING); p->des1 |= cpu_to_le32(ERDES1_END_RING);
@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
} }
/* Normal descriptors */ /* Normal descriptors */
static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
{ {
p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) if (bfsize >= BUF_SIZE_2KiB) {
<< RDES1_BUFFER2_SIZE_SHIFT) int bfsize2;
& RDES1_BUFFER2_SIZE_MASK);
bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
& RDES1_BUFFER2_SIZE_MASK);
}
if (end) if (end)
p->des1 |= cpu_to_le32(RDES1_END_RING); p->des1 |= cpu_to_le32(RDES1_END_RING);

View File

@ -296,7 +296,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
} }
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end) int mode, int end, int bfsize)
{ {
dwmac4_set_rx_owner(p, disable_rx_ic); dwmac4_set_rx_owner(p, disable_rx_ic);
} }

View File

@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
} }
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end) int mode, int end, int bfsize)
{ {
dwxgmac2_set_rx_owner(p, disable_rx_ic); dwxgmac2_set_rx_owner(p, disable_rx_ic);
} }

View File

@ -259,15 +259,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
} }
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end) int mode, int end, int bfsize)
{ {
int bfsize1;
p->des0 |= cpu_to_le32(RDES0_OWN); p->des0 |= cpu_to_le32(RDES0_OWN);
p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
bfsize1 = min(bfsize, BUF_SIZE_8KiB);
p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE) if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p); ehn_desc_rx_set_on_chain(p);
else else
ehn_desc_rx_set_on_ring(p, end); ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic) if (disable_rx_ic)
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);

View File

@ -33,7 +33,7 @@ struct dma_extended_desc;
struct stmmac_desc_ops { struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */ /* DMA RX descriptor ring initialization */
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
int end); int end, int bfsize);
/* DMA TX descriptor ring initialization */ /* DMA TX descriptor ring initialization */
void (*init_tx_desc)(struct dma_desc *p, int mode, int end); void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */ /* Invoked by the xmit function to prepare the tx descriptor */

View File

@ -135,15 +135,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
} }
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
int end) int end, int bfsize)
{ {
int bfsize1;
p->des0 |= cpu_to_le32(RDES0_OWN); p->des0 |= cpu_to_le32(RDES0_OWN);
p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE) if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end); ndesc_rx_set_on_chain(p, end);
else else
ndesc_rx_set_on_ring(p, end); ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic) if (disable_rx_ic)
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);

View File

@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode, priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1)); (i == DMA_RX_SIZE - 1),
priv->dma_buf_sz);
else else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode, priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1)); (i == DMA_RX_SIZE - 1),
priv->dma_buf_sz);
} }
/** /**