mirror of https://gitee.com/openkylin/linux.git
net: stmmac: Let TX and RX interrupts be independently enabled/disabled
By using this mechanism we can get rid of the not so nice method of scheduling TX NAPI when the RX was scheduled. No bandwidth reduction was seen with this change. Changes from v1: - Remove useless comment (Jakub) - Do not bind the TX clean to NAPI budget (Jakub) Signed-off-by: Jose Abreu <Jose.Abreu@synopsys.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
7d0b447a3f
commit
021bd5e369
|
@ -335,14 +335,30 @@ static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
|
|||
}
|
||||
}
|
||||
|
||||
static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan,
|
||||
bool rx, bool tx)
|
||||
{
|
||||
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
|
||||
u32 value = readl(ioaddr + EMAC_INT_EN);
|
||||
|
||||
if (rx)
|
||||
value |= EMAC_RX_INT;
|
||||
if (tx)
|
||||
value |= EMAC_TX_INT;
|
||||
|
||||
writel(value, ioaddr + EMAC_INT_EN);
|
||||
}
|
||||
|
||||
static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan,
|
||||
bool rx, bool tx)
|
||||
{
|
||||
writel(0, ioaddr + EMAC_INT_EN);
|
||||
u32 value = readl(ioaddr + EMAC_INT_EN);
|
||||
|
||||
if (rx)
|
||||
value &= ~EMAC_RX_INT;
|
||||
if (tx)
|
||||
value &= ~EMAC_TX_INT;
|
||||
|
||||
writel(value, ioaddr + EMAC_INT_EN);
|
||||
}
|
||||
|
||||
static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
|
||||
|
|
|
@ -168,6 +168,8 @@
|
|||
/* DMA default interrupt mask for 4.00 */
|
||||
#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
|
||||
DMA_CHAN_INTR_ABNORMAL)
|
||||
#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
|
||||
#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
|
||||
|
||||
#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
|
||||
DMA_CHAN_INTR_ENA_RIE | \
|
||||
|
@ -178,6 +180,8 @@
|
|||
/* DMA default interrupt mask for 4.10a */
|
||||
#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
|
||||
DMA_CHAN_INTR_ABNORMAL_4_10)
|
||||
#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
|
||||
#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
|
||||
|
||||
/* channel 0 specific fields */
|
||||
#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
|
||||
|
@ -186,9 +190,10 @@
|
|||
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
|
||||
|
||||
int dwmac4_dma_reset(void __iomem *ioaddr);
|
||||
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
|
||||
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
|
||||
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
|
||||
void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
|
||||
void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
|
||||
|
|
|
@ -97,21 +97,52 @@ void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
|
|||
writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
|
||||
}
|
||||
|
||||
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
|
||||
{
|
||||
writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
|
||||
DMA_CHAN_INTR_ENA(chan));
|
||||
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
|
||||
if (rx)
|
||||
value |= DMA_CHAN_INTR_DEFAULT_RX;
|
||||
if (tx)
|
||||
value |= DMA_CHAN_INTR_DEFAULT_TX;
|
||||
|
||||
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
}
|
||||
|
||||
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
|
||||
{
|
||||
writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
|
||||
ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
|
||||
if (rx)
|
||||
value |= DMA_CHAN_INTR_DEFAULT_RX_4_10;
|
||||
if (tx)
|
||||
value |= DMA_CHAN_INTR_DEFAULT_TX_4_10;
|
||||
|
||||
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
}
|
||||
|
||||
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
|
||||
{
|
||||
writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
|
||||
if (rx)
|
||||
value &= ~DMA_CHAN_INTR_DEFAULT_RX;
|
||||
if (tx)
|
||||
value &= ~DMA_CHAN_INTR_DEFAULT_TX;
|
||||
|
||||
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
}
|
||||
|
||||
void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
|
||||
if (rx)
|
||||
value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10;
|
||||
if (tx)
|
||||
value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10;
|
||||
|
||||
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
}
|
||||
|
||||
int dwmac4_dma_interrupt(void __iomem *ioaddr,
|
||||
|
|
|
@ -96,6 +96,8 @@
|
|||
|
||||
/* DMA default interrupt mask */
|
||||
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
|
||||
#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
|
||||
#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
|
||||
|
||||
/* DMA Status register defines */
|
||||
#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
|
||||
|
@ -130,8 +132,8 @@
|
|||
#define NUM_DWMAC1000_DMA_REGS 23
|
||||
|
||||
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
|
||||
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
|
||||
void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
|
||||
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
|
||||
|
|
|
@ -37,14 +37,28 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
|
|||
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
|
||||
}
|
||||
|
||||
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
|
||||
{
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
|
||||
u32 value = readl(ioaddr + DMA_INTR_ENA);
|
||||
|
||||
if (rx)
|
||||
value |= DMA_INTR_DEFAULT_RX;
|
||||
if (tx)
|
||||
value |= DMA_INTR_DEFAULT_TX;
|
||||
|
||||
writel(value, ioaddr + DMA_INTR_ENA);
|
||||
}
|
||||
|
||||
void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
|
||||
{
|
||||
writel(0, ioaddr + DMA_INTR_ENA);
|
||||
u32 value = readl(ioaddr + DMA_INTR_ENA);
|
||||
|
||||
if (rx)
|
||||
value &= ~DMA_INTR_DEFAULT_RX;
|
||||
if (tx)
|
||||
value &= ~DMA_INTR_DEFAULT_TX;
|
||||
|
||||
writel(value, ioaddr + DMA_INTR_ENA);
|
||||
}
|
||||
|
||||
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
|
||||
|
|
|
@ -361,6 +361,8 @@
|
|||
#define XGMAC_TIE BIT(0)
|
||||
#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
|
||||
XGMAC_RIE | XGMAC_TIE)
|
||||
#define XGMAC_DMA_INT_DEFAULT_RX (XGMAC_RBUE | XGMAC_RIE)
|
||||
#define XGMAC_DMA_INT_DEFAULT_TX (XGMAC_TIE)
|
||||
#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
|
||||
#define XGMAC_RWT GENMASK(7, 0)
|
||||
#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
|
||||
|
|
|
@ -248,14 +248,30 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
|
|||
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
|
||||
}
|
||||
|
||||
static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan,
|
||||
bool rx, bool tx)
|
||||
{
|
||||
writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
||||
u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
||||
|
||||
if (rx)
|
||||
value |= XGMAC_DMA_INT_DEFAULT_RX;
|
||||
if (tx)
|
||||
value |= XGMAC_DMA_INT_DEFAULT_TX;
|
||||
|
||||
writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
||||
}
|
||||
|
||||
static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan,
|
||||
bool rx, bool tx)
|
||||
{
|
||||
writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
||||
u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
||||
|
||||
if (rx)
|
||||
value &= ~XGMAC_DMA_INT_DEFAULT_RX;
|
||||
if (tx)
|
||||
value &= ~XGMAC_DMA_INT_DEFAULT_TX;
|
||||
|
||||
writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
||||
}
|
||||
|
||||
static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
|
||||
|
|
|
@ -187,8 +187,10 @@ struct stmmac_dma_ops {
|
|||
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
|
||||
void __iomem *ioaddr);
|
||||
void (*enable_dma_transmission) (void __iomem *ioaddr);
|
||||
void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
|
||||
void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
|
||||
void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan,
|
||||
bool rx, bool tx);
|
||||
void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan,
|
||||
bool rx, bool tx);
|
||||
void (*start_tx)(void __iomem *ioaddr, u32 chan);
|
||||
void (*stop_tx)(void __iomem *ioaddr, u32 chan);
|
||||
void (*start_rx)(void __iomem *ioaddr, u32 chan);
|
||||
|
|
|
@ -88,6 +88,7 @@ struct stmmac_channel {
|
|||
struct napi_struct rx_napi ____cacheline_aligned_in_smp;
|
||||
struct napi_struct tx_napi ____cacheline_aligned_in_smp;
|
||||
struct stmmac_priv *priv_data;
|
||||
spinlock_t lock;
|
||||
u32 index;
|
||||
};
|
||||
|
||||
|
|
|
@ -2069,17 +2069,25 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
|
|||
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
|
||||
&priv->xstats, chan);
|
||||
struct stmmac_channel *ch = &priv->channel[chan];
|
||||
unsigned long flags;
|
||||
|
||||
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
|
||||
if (napi_schedule_prep(&ch->rx_napi)) {
|
||||
stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
|
||||
spin_lock_irqsave(&ch->lock, flags);
|
||||
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
|
||||
spin_unlock_irqrestore(&ch->lock, flags);
|
||||
__napi_schedule_irqoff(&ch->rx_napi);
|
||||
status |= handle_tx;
|
||||
}
|
||||
}
|
||||
|
||||
if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
|
||||
napi_schedule_irqoff(&ch->tx_napi);
|
||||
if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
|
||||
if (napi_schedule_prep(&ch->tx_napi)) {
|
||||
spin_lock_irqsave(&ch->lock, flags);
|
||||
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
|
||||
spin_unlock_irqrestore(&ch->lock, flags);
|
||||
__napi_schedule_irqoff(&ch->tx_napi);
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -2274,14 +2282,14 @@ static void stmmac_tx_timer(struct timer_list *t)
|
|||
|
||||
ch = &priv->channel[tx_q->queue_index];
|
||||
|
||||
/*
|
||||
* If NAPI is already running we can miss some events. Let's rearm
|
||||
* the timer and try again.
|
||||
*/
|
||||
if (likely(napi_schedule_prep(&ch->tx_napi)))
|
||||
if (likely(napi_schedule_prep(&ch->tx_napi))) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ch->lock, flags);
|
||||
stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
|
||||
spin_unlock_irqrestore(&ch->lock, flags);
|
||||
__napi_schedule(&ch->tx_napi);
|
||||
else
|
||||
mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3751,8 +3759,14 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
|
|||
priv->xstats.napi_poll++;
|
||||
|
||||
work_done = stmmac_rx(priv, budget, chan);
|
||||
if (work_done < budget && napi_complete_done(napi, work_done))
|
||||
stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
|
||||
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ch->lock, flags);
|
||||
stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
|
||||
spin_unlock_irqrestore(&ch->lock, flags);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
|
@ -3761,7 +3775,6 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
|
|||
struct stmmac_channel *ch =
|
||||
container_of(napi, struct stmmac_channel, tx_napi);
|
||||
struct stmmac_priv *priv = ch->priv_data;
|
||||
struct stmmac_tx_queue *tx_q;
|
||||
u32 chan = ch->index;
|
||||
int work_done;
|
||||
|
||||
|
@ -3770,15 +3783,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
|
|||
work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
|
||||
work_done = min(work_done, budget);
|
||||
|
||||
if (work_done < budget)
|
||||
napi_complete_done(napi, work_done);
|
||||
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
||||
unsigned long flags;
|
||||
|
||||
/* Force transmission restart */
|
||||
tx_q = &priv->tx_queue[chan];
|
||||
if (tx_q->cur_tx != tx_q->dirty_tx) {
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr);
|
||||
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
|
||||
chan);
|
||||
spin_lock_irqsave(&ch->lock, flags);
|
||||
stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
|
||||
spin_unlock_irqrestore(&ch->lock, flags);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
|
@ -4714,6 +4724,7 @@ int stmmac_dvr_probe(struct device *device,
|
|||
for (queue = 0; queue < maxq; queue++) {
|
||||
struct stmmac_channel *ch = &priv->channel[queue];
|
||||
|
||||
spin_lock_init(&ch->lock);
|
||||
ch->priv_data = priv;
|
||||
ch->index = queue;
|
||||
|
||||
|
|
Loading…
Reference in New Issue