mirror of https://gitee.com/openkylin/linux.git
spi: spi-geni-qcom: Mo' betta locking
If you added a bit of a delay (like a trace_printk) into the ISR for
the spi-geni-qcom driver, you would suddenly start seeing some errors
spit out. The problem was that, though the ISR itself held a lock,
other parts of the driver didn't always grab the lock.
One example race was this:
CPU0 CPU1
---- ----
spi_geni_set_cs()
mas->cur_mcmd = CMD_CS;
geni_se_setup_m_cmd(...)
wait_for_completion_timeout(&xfer_done);
<INTERRUPT>
geni_spi_isr()
complete(&xfer_done);
<wakeup>
pm_runtime_put(mas->dev);
... // back to SPI core
spi_geni_transfer_one()
setup_fifo_xfer()
mas->cur_mcmd = CMD_XFER;
mas->cur_cmd = CMD_NONE; // bad!
return IRQ_HANDLED;
Let's fix this. Before we start messing with hardware, we'll grab the
lock to make sure that the IRQ handler from some previous command has
really finished. We don't need to hold the lock unless we're in a
state where more interrupts can come in, but we at least need to make
sure the previous IRQ is done. This lock is used exclusively to
prevent the IRQ handler and non-IRQ from stomping on each other. The
SPI core handles all other mutual exclusion.
As part of this, we change the way that the IRQ handler detects
spurious interrupts. Previously we checked for our state variable
being set to IRQ_NONE, but that was done outside the spinlock. We
could move it into the spinlock, but instead let's just change it to
look for the lack of any IRQ status bits being set. This can be done
outside the lock--the hardware certainly isn't grabbing or looking at
the spinlock when it updates its status register.
It's possible that this will fix real (but very rare) errors seen in
the field that look like:
irq ...: nobody cared (try booting with the "irqpoll" option)
NOTE: an alternate strategy considered here was to always make the
complete() / spi_finalize_current_transfer() the very last thing in
our IRQ handler. With such a change you could consider that we could
be "lockless". In that case, though, we'd have to be very careful w/
memory barriers so we made sure we didn't have any bugs with weakly
ordered memory. Using spinlocks makes the driver much easier to
understand.
Fixes: 561de45f72
("spi: spi-geni-qcom: Add SPI driver support for GENI based QUP")
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Reviewed-by: Stephen Boyd <swboyd@chromium.org>
Link: https://lore.kernel.org/r/20200618080459.v4.2.I752ebdcfd5e8bf0de06d66e767b8974932b3620e@changeid
Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
parent
539afdf969
commit
2ee471a1e2
|
@ -151,16 +151,18 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
|
|||
struct geni_se *se = &mas->se;
|
||||
unsigned long time_left;
|
||||
|
||||
reinit_completion(&mas->xfer_done);
|
||||
pm_runtime_get_sync(mas->dev);
|
||||
if (!(slv->mode & SPI_CS_HIGH))
|
||||
set_flag = !set_flag;
|
||||
|
||||
spin_lock_irq(&mas->lock);
|
||||
reinit_completion(&mas->xfer_done);
|
||||
mas->cur_mcmd = CMD_CS;
|
||||
if (set_flag)
|
||||
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
|
||||
else
|
||||
geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
|
||||
spin_unlock_irq(&mas->lock);
|
||||
|
||||
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
|
||||
if (!time_left)
|
||||
|
@ -307,6 +309,21 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
|
|||
u32 spi_tx_cfg, len;
|
||||
struct geni_se *se = &mas->se;
|
||||
|
||||
/*
|
||||
* Ensure that our interrupt handler isn't still running from some
|
||||
* prior command before we start messing with the hardware behind
|
||||
* its back. We don't need to _keep_ the lock here since we're only
|
||||
* worried about racing with out interrupt handler. The SPI core
|
||||
* already handles making sure that we're not trying to do two
|
||||
* transfers at once or setting a chip select and doing a transfer
|
||||
* concurrently.
|
||||
*
|
||||
* NOTE: we actually _can't_ hold the lock here because possibly we
|
||||
* might call clk_set_rate() which needs to be able to sleep.
|
||||
*/
|
||||
spin_lock_irq(&mas->lock);
|
||||
spin_unlock_irq(&mas->lock);
|
||||
|
||||
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
|
||||
if (xfer->bits_per_word != mas->cur_bits_per_word) {
|
||||
spi_setup_word_len(mas, mode, xfer->bits_per_word);
|
||||
|
@ -367,6 +384,12 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
|
|||
}
|
||||
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
|
||||
mas->cur_mcmd = CMD_XFER;
|
||||
|
||||
/*
|
||||
* Lock around right before we start the transfer since our
|
||||
* interrupt could come in at any time now.
|
||||
*/
|
||||
spin_lock_irq(&mas->lock);
|
||||
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
|
||||
|
||||
/*
|
||||
|
@ -376,6 +399,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
|
|||
*/
|
||||
if (m_cmd & SPI_TX_ONLY)
|
||||
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
|
||||
spin_unlock_irq(&mas->lock);
|
||||
}
|
||||
|
||||
static int spi_geni_transfer_one(struct spi_master *spi,
|
||||
|
@ -478,11 +502,11 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
|
|||
struct geni_se *se = &mas->se;
|
||||
u32 m_irq;
|
||||
|
||||
if (mas->cur_mcmd == CMD_NONE)
|
||||
m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
|
||||
if (!m_irq)
|
||||
return IRQ_NONE;
|
||||
|
||||
spin_lock(&mas->lock);
|
||||
m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
|
||||
|
||||
if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
|
||||
geni_spi_handle_rx(mas);
|
||||
|
@ -522,8 +546,23 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
|
|||
complete(&mas->xfer_done);
|
||||
}
|
||||
|
||||
/*
|
||||
* It's safe or a good idea to Ack all of our our interrupts at the
|
||||
* end of the function. Specifically:
|
||||
* - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
|
||||
* clearing Acks. Clearing at the end relies on nobody else having
|
||||
* started a new transfer yet or else we could be clearing _their_
|
||||
* done bit, but everyone grabs the spinlock before starting a new
|
||||
* transfer.
|
||||
* - M_RX_FIFO_WATERMARK_EN / M_TX_FIFO_WATERMARK_EN: These appear
|
||||
* to be "latched level" interrupts so it's important to clear them
|
||||
* _after_ you've handled the condition and always safe to do so
|
||||
* since they'll re-assert if they're still happening.
|
||||
*/
|
||||
writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
|
||||
|
||||
spin_unlock(&mas->lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue