2018-08-22 06:02:23 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2012-03-07 13:46:25 +08:00
|
|
|
/*
|
|
|
|
* SH RSPI driver
|
|
|
|
*
|
2014-01-24 16:43:58 +08:00
|
|
|
* Copyright (C) 2012, 2013 Renesas Solutions Corp.
|
2014-01-30 16:43:50 +08:00
|
|
|
* Copyright (C) 2014 Glider bvba
|
2012-03-07 13:46:25 +08:00
|
|
|
*
|
|
|
|
* Based on spi-sh.c:
|
|
|
|
* Copyright (C) 2011 Renesas Solutions Corp.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/clk.h>
|
2012-04-20 13:50:36 +08:00
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2014-01-28 17:21:38 +08:00
|
|
|
#include <linux/of_device.h>
|
2014-03-11 17:59:12 +08:00
|
|
|
#include <linux/pm_runtime.h>
|
2012-04-20 13:50:36 +08:00
|
|
|
#include <linux/sh_dma.h>
|
2012-03-07 13:46:25 +08:00
|
|
|
#include <linux/spi/spi.h>
|
2012-04-20 13:50:36 +08:00
|
|
|
#include <linux/spi/rspi.h>
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2014-01-12 18:27:37 +08:00
|
|
|
#define RSPI_SPCR 0x00 /* Control Register */
|
|
|
|
#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
|
|
|
|
#define RSPI_SPPCR 0x02 /* Pin Control Register */
|
|
|
|
#define RSPI_SPSR 0x03 /* Status Register */
|
|
|
|
#define RSPI_SPDR 0x04 /* Data Register */
|
|
|
|
#define RSPI_SPSCR 0x08 /* Sequence Control Register */
|
|
|
|
#define RSPI_SPSSR 0x09 /* Sequence Status Register */
|
|
|
|
#define RSPI_SPBR 0x0a /* Bit Rate Register */
|
|
|
|
#define RSPI_SPDCR 0x0b /* Data Control Register */
|
|
|
|
#define RSPI_SPCKD 0x0c /* Clock Delay Register */
|
|
|
|
#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
|
|
|
|
#define RSPI_SPND 0x0e /* Next-Access Delay Register */
|
2014-01-24 16:43:59 +08:00
|
|
|
#define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
|
2014-01-12 18:27:37 +08:00
|
|
|
#define RSPI_SPCMD0 0x10 /* Command Register 0 */
|
|
|
|
#define RSPI_SPCMD1 0x12 /* Command Register 1 */
|
|
|
|
#define RSPI_SPCMD2 0x14 /* Command Register 2 */
|
|
|
|
#define RSPI_SPCMD3 0x16 /* Command Register 3 */
|
|
|
|
#define RSPI_SPCMD4 0x18 /* Command Register 4 */
|
|
|
|
#define RSPI_SPCMD5 0x1a /* Command Register 5 */
|
|
|
|
#define RSPI_SPCMD6 0x1c /* Command Register 6 */
|
|
|
|
#define RSPI_SPCMD7 0x1e /* Command Register 7 */
|
2014-01-30 16:43:50 +08:00
|
|
|
#define RSPI_SPCMD(i) (RSPI_SPCMD0 + (i) * 2)
|
|
|
|
#define RSPI_NUM_SPCMD 8
|
|
|
|
#define RSPI_RZ_NUM_SPCMD 4
|
|
|
|
#define QSPI_NUM_SPCMD 4
|
2014-01-24 16:43:59 +08:00
|
|
|
|
|
|
|
/* RSPI on RZ only */
|
2014-01-12 18:27:37 +08:00
|
|
|
#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
|
|
|
|
#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2014-01-24 16:43:59 +08:00
|
|
|
/* QSPI only */
|
2014-01-12 18:27:38 +08:00
|
|
|
#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
|
|
|
|
#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
|
|
|
|
#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
|
|
|
|
#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
|
|
|
|
#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
|
|
|
|
#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
|
2014-01-30 16:43:50 +08:00
|
|
|
#define QSPI_SPBMUL(i) (QSPI_SPBMUL0 + (i) * 4)
|
2013-09-03 12:10:26 +08:00
|
|
|
|
2014-01-12 18:27:37 +08:00
|
|
|
/* SPCR - Control Register */
|
|
|
|
#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
|
|
|
|
#define SPCR_SPE 0x40 /* Function Enable */
|
|
|
|
#define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */
|
|
|
|
#define SPCR_SPEIE 0x10 /* Error Interrupt Enable */
|
|
|
|
#define SPCR_MSTR 0x08 /* Master/Slave Mode Select */
|
|
|
|
#define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */
|
|
|
|
/* RSPI on SH only */
|
|
|
|
#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
|
|
|
|
#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
|
2014-08-28 16:10:19 +08:00
|
|
|
/* QSPI on R-Car Gen2 only */
|
2014-01-12 18:27:38 +08:00
|
|
|
#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
|
|
|
|
#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
|
2014-01-12 18:27:37 +08:00
|
|
|
|
|
|
|
/* SSLP - Slave Select Polarity Register */
|
|
|
|
#define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */
|
|
|
|
#define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */
|
|
|
|
|
|
|
|
/* SPPCR - Pin Control Register */
|
|
|
|
#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
|
|
|
|
#define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */
|
2012-03-07 13:46:25 +08:00
|
|
|
#define SPPCR_SPOM 0x04
|
2014-01-12 18:27:37 +08:00
|
|
|
#define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */
|
|
|
|
#define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */
|
|
|
|
|
2014-01-12 18:27:38 +08:00
|
|
|
#define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
|
|
|
|
#define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
|
|
|
|
|
2014-01-12 18:27:37 +08:00
|
|
|
/* SPSR - Status Register */
|
|
|
|
#define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */
|
|
|
|
#define SPSR_TEND 0x40 /* Transmit End */
|
|
|
|
#define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */
|
|
|
|
#define SPSR_PERF 0x08 /* Parity Error Flag */
|
|
|
|
#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
|
|
|
|
#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
|
2014-01-24 16:43:59 +08:00
|
|
|
#define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
|
2014-01-12 18:27:37 +08:00
|
|
|
|
|
|
|
/* SPSCR - Sequence Control Register */
|
|
|
|
#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
|
|
|
|
|
|
|
|
/* SPSSR - Sequence Status Register */
|
|
|
|
#define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */
|
|
|
|
#define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */
|
|
|
|
|
|
|
|
/* SPDCR - Data Control Register */
|
|
|
|
#define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */
|
|
|
|
#define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */
|
|
|
|
#define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */
|
|
|
|
#define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0)
|
|
|
|
#define SPDCR_SPLWORD SPDCR_SPLW1
|
|
|
|
#define SPDCR_SPLBYTE SPDCR_SPLW0
|
|
|
|
#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
|
2014-01-24 16:43:59 +08:00
|
|
|
#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
|
2012-03-07 13:46:25 +08:00
|
|
|
#define SPDCR_SLSEL1 0x08
|
|
|
|
#define SPDCR_SLSEL0 0x04
|
2014-01-24 16:43:59 +08:00
|
|
|
#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
|
2012-03-07 13:46:25 +08:00
|
|
|
#define SPDCR_SPFC1 0x02
|
|
|
|
#define SPDCR_SPFC0 0x01
|
2014-01-24 16:43:59 +08:00
|
|
|
#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2014-01-12 18:27:37 +08:00
|
|
|
/* SPCKD - Clock Delay Register */
|
|
|
|
#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2014-01-12 18:27:37 +08:00
|
|
|
/* SSLND - Slave Select Negation Delay Register */
|
|
|
|
#define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2014-01-12 18:27:37 +08:00
|
|
|
/* SPND - Next-Access Delay Register */
|
|
|
|
#define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2014-01-12 18:27:37 +08:00
|
|
|
/* SPCR2 - Control Register 2 */
|
|
|
|
#define SPCR2_PTE 0x08 /* Parity Self-Test Enable */
|
|
|
|
#define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */
|
|
|
|
#define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */
|
|
|
|
#define SPCR2_SPPE 0x01 /* Parity Enable */
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2014-01-12 18:27:37 +08:00
|
|
|
/* SPCMDn - Command Registers */
|
|
|
|
#define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */
|
|
|
|
#define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */
|
|
|
|
#define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */
|
|
|
|
#define SPCMD_LSBF 0x1000 /* LSB First */
|
|
|
|
#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
|
2012-03-07 13:46:25 +08:00
|
|
|
#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
|
2014-01-30 16:43:50 +08:00
|
|
|
#define SPCMD_SPB_8BIT 0x0000 /* QSPI only */
|
2013-09-03 12:10:26 +08:00
|
|
|
#define SPCMD_SPB_16BIT 0x0100
|
2012-03-07 13:46:25 +08:00
|
|
|
#define SPCMD_SPB_20BIT 0x0000
|
|
|
|
#define SPCMD_SPB_24BIT 0x0100
|
|
|
|
#define SPCMD_SPB_32BIT 0x0200
|
2014-01-12 18:27:37 +08:00
|
|
|
#define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */
|
2014-01-12 18:27:38 +08:00
|
|
|
#define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */
|
|
|
|
#define SPCMD_SPIMOD1 0x0040
|
|
|
|
#define SPCMD_SPIMOD0 0x0020
|
|
|
|
#define SPCMD_SPIMOD_SINGLE 0
|
|
|
|
#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
|
|
|
|
#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
|
|
|
|
#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
|
2014-01-12 18:27:37 +08:00
|
|
|
#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
|
|
|
|
#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
|
|
|
|
#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
|
|
|
|
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
|
|
|
|
|
|
|
|
/* SPBFCR - Buffer Control Register */
|
2014-01-24 16:43:59 +08:00
|
|
|
#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
|
|
|
|
#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
|
2014-01-12 18:27:37 +08:00
|
|
|
#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
|
|
|
|
#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
|
2014-10-23 11:14:13 +08:00
|
|
|
/* QSPI on R-Car Gen2 */
|
|
|
|
#define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */
|
|
|
|
#define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */
|
|
|
|
#define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */
|
|
|
|
#define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */
|
|
|
|
|
|
|
|
#define QSPI_BUFFER_SIZE 32u
|
2013-09-03 12:10:26 +08:00
|
|
|
|
2012-03-07 13:46:25 +08:00
|
|
|
struct rspi_data {
|
|
|
|
void __iomem *addr;
|
|
|
|
u32 max_speed_hz;
|
2019-02-08 17:09:07 +08:00
|
|
|
struct spi_controller *ctlr;
|
2012-03-07 13:46:25 +08:00
|
|
|
wait_queue_head_t wait;
|
|
|
|
struct clk *clk;
|
2014-01-12 18:27:43 +08:00
|
|
|
u16 spcmd;
|
2014-01-24 16:44:00 +08:00
|
|
|
u8 spsr;
|
|
|
|
u8 sppcr;
|
2014-01-24 16:43:58 +08:00
|
|
|
int rx_irq, tx_irq;
|
2013-09-03 12:10:26 +08:00
|
|
|
const struct spi_ops *ops;
|
2012-04-20 13:50:36 +08:00
|
|
|
|
|
|
|
unsigned dma_callbacked:1;
|
2014-01-24 16:43:53 +08:00
|
|
|
unsigned byte_access:1;
|
2012-03-07 13:46:25 +08:00
|
|
|
};
|
|
|
|
|
2013-12-24 17:49:32 +08:00
|
|
|
static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
|
|
|
iowrite8(data, rspi->addr + offset);
|
|
|
|
}
|
|
|
|
|
2013-12-24 17:49:32 +08:00
|
|
|
static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
|
|
|
iowrite16(data, rspi->addr + offset);
|
|
|
|
}
|
|
|
|
|
2013-12-24 17:49:32 +08:00
|
|
|
static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
|
2013-09-03 12:10:26 +08:00
|
|
|
{
|
|
|
|
iowrite32(data, rspi->addr + offset);
|
|
|
|
}
|
|
|
|
|
2013-12-24 17:49:32 +08:00
|
|
|
static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
|
|
|
return ioread8(rspi->addr + offset);
|
|
|
|
}
|
|
|
|
|
2013-12-24 17:49:32 +08:00
|
|
|
static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
|
|
|
return ioread16(rspi->addr + offset);
|
|
|
|
}
|
|
|
|
|
2014-01-24 16:43:53 +08:00
|
|
|
static void rspi_write_data(const struct rspi_data *rspi, u16 data)
|
|
|
|
{
|
|
|
|
if (rspi->byte_access)
|
|
|
|
rspi_write8(rspi, data, RSPI_SPDR);
|
|
|
|
else /* 16 bit */
|
|
|
|
rspi_write16(rspi, data, RSPI_SPDR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 rspi_read_data(const struct rspi_data *rspi)
|
|
|
|
{
|
|
|
|
if (rspi->byte_access)
|
|
|
|
return rspi_read8(rspi, RSPI_SPDR);
|
|
|
|
else /* 16 bit */
|
|
|
|
return rspi_read16(rspi, RSPI_SPDR);
|
|
|
|
}
|
|
|
|
|
2013-09-03 12:10:26 +08:00
|
|
|
/* optional functions */
|
|
|
|
struct spi_ops {
|
2014-01-24 16:43:53 +08:00
|
|
|
int (*set_config_register)(struct rspi_data *rspi, int access_size);
|
2019-02-08 17:09:07 +08:00
|
|
|
int (*transfer_one)(struct spi_controller *ctlr,
|
|
|
|
struct spi_device *spi, struct spi_transfer *xfer);
|
2014-01-30 16:43:50 +08:00
|
|
|
u16 mode_bits;
|
2014-06-02 21:38:06 +08:00
|
|
|
u16 flags;
|
2014-06-02 21:38:12 +08:00
|
|
|
u16 fifo_size;
|
2013-09-03 12:10:26 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2014-01-24 16:43:59 +08:00
|
|
|
* functions for RSPI on legacy SH
|
2013-09-03 12:10:26 +08:00
|
|
|
*/
|
2014-01-24 16:43:53 +08:00
|
|
|
static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
2013-09-03 12:10:26 +08:00
|
|
|
int spbr;
|
|
|
|
|
2014-01-24 16:44:00 +08:00
|
|
|
/* Sets output mode, MOSI signal, and (optionally) loopback */
|
|
|
|
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2013-09-03 12:10:26 +08:00
|
|
|
/* Sets transfer bit rate */
|
2014-05-23 02:07:35 +08:00
|
|
|
spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
|
|
|
|
2 * rspi->max_speed_hz) - 1;
|
2013-09-03 12:10:26 +08:00
|
|
|
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
|
|
|
|
|
2014-01-24 16:43:53 +08:00
|
|
|
/* Disable dummy transmission, set 16-bit word access, 1 frame */
|
|
|
|
rspi_write8(rspi, 0, RSPI_SPDCR);
|
|
|
|
rspi->byte_access = 0;
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2013-09-03 12:10:26 +08:00
|
|
|
/* Sets RSPCK, SSL, next-access delay value */
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SPCKD);
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SSLND);
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SPND);
|
|
|
|
|
|
|
|
/* Sets parity, interrupt mask */
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SPCR2);
|
|
|
|
|
2019-03-13 02:45:13 +08:00
|
|
|
/* Resets sequencer */
|
|
|
|
rspi_write8(rspi, 0, RSPI_SPSCR);
|
2014-01-30 16:43:50 +08:00
|
|
|
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
|
|
|
|
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
|
2013-09-03 12:10:26 +08:00
|
|
|
|
|
|
|
/* Sets RSPI mode */
|
|
|
|
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
|
|
|
|
|
|
|
|
return 0;
|
2012-03-07 13:46:25 +08:00
|
|
|
}
|
|
|
|
|
2014-01-24 16:43:59 +08:00
|
|
|
/*
|
|
|
|
* functions for RSPI on RZ
|
|
|
|
*/
|
|
|
|
static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
|
|
|
|
{
|
|
|
|
int spbr;
|
2016-08-05 21:36:03 +08:00
|
|
|
int div = 0;
|
|
|
|
unsigned long clksrc;
|
2014-01-24 16:43:59 +08:00
|
|
|
|
2014-01-24 16:44:00 +08:00
|
|
|
/* Sets output mode, MOSI signal, and (optionally) loopback */
|
|
|
|
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
|
2014-01-24 16:43:59 +08:00
|
|
|
|
2016-08-05 21:36:03 +08:00
|
|
|
clksrc = clk_get_rate(rspi->clk);
|
|
|
|
while (div < 3) {
|
|
|
|
if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
|
|
|
|
break;
|
|
|
|
div++;
|
|
|
|
clksrc /= 2;
|
|
|
|
}
|
|
|
|
|
2014-01-24 16:43:59 +08:00
|
|
|
/* Sets transfer bit rate */
|
2016-08-05 21:36:03 +08:00
|
|
|
spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1;
|
2014-01-24 16:43:59 +08:00
|
|
|
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
|
2016-08-05 21:36:03 +08:00
|
|
|
rspi->spcmd |= div << 2;
|
2014-01-24 16:43:59 +08:00
|
|
|
|
|
|
|
/* Disable dummy transmission, set byte access */
|
|
|
|
rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
|
|
|
|
rspi->byte_access = 1;
|
|
|
|
|
|
|
|
/* Sets RSPCK, SSL, next-access delay value */
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SPCKD);
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SSLND);
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SPND);
|
|
|
|
|
2019-03-13 02:45:13 +08:00
|
|
|
/* Resets sequencer */
|
|
|
|
rspi_write8(rspi, 0, RSPI_SPSCR);
|
2014-01-24 16:43:59 +08:00
|
|
|
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
|
|
|
|
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
|
|
|
|
|
|
|
|
/* Sets RSPI mode */
|
|
|
|
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-03 12:10:26 +08:00
|
|
|
/*
|
|
|
|
* functions for QSPI
|
|
|
|
*/
|
2014-01-24 16:43:53 +08:00
|
|
|
static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
|
2013-09-03 12:10:26 +08:00
|
|
|
{
|
|
|
|
int spbr;
|
|
|
|
|
2014-01-24 16:44:00 +08:00
|
|
|
/* Sets output mode, MOSI signal, and (optionally) loopback */
|
|
|
|
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
|
2013-09-03 12:10:26 +08:00
|
|
|
|
|
|
|
/* Sets transfer bit rate */
|
2014-05-23 02:07:35 +08:00
|
|
|
spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
|
2013-09-03 12:10:26 +08:00
|
|
|
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
|
|
|
|
|
2014-01-24 16:43:53 +08:00
|
|
|
/* Disable dummy transmission, set byte access */
|
|
|
|
rspi_write8(rspi, 0, RSPI_SPDCR);
|
|
|
|
rspi->byte_access = 1;
|
2013-09-03 12:10:26 +08:00
|
|
|
|
|
|
|
/* Sets RSPCK, SSL, next-access delay value */
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SPCKD);
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SSLND);
|
|
|
|
rspi_write8(rspi, 0x00, RSPI_SPND);
|
|
|
|
|
|
|
|
/* Data Length Setting */
|
|
|
|
if (access_size == 8)
|
2014-01-30 16:43:50 +08:00
|
|
|
rspi->spcmd |= SPCMD_SPB_8BIT;
|
2013-09-03 12:10:26 +08:00
|
|
|
else if (access_size == 16)
|
2014-01-30 16:43:50 +08:00
|
|
|
rspi->spcmd |= SPCMD_SPB_16BIT;
|
2013-11-27 08:41:44 +08:00
|
|
|
else
|
2014-01-30 16:43:50 +08:00
|
|
|
rspi->spcmd |= SPCMD_SPB_32BIT;
|
2013-09-03 12:10:26 +08:00
|
|
|
|
2014-01-30 16:43:50 +08:00
|
|
|
rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
|
2013-09-03 12:10:26 +08:00
|
|
|
|
|
|
|
/* Resets transfer data length */
|
|
|
|
rspi_write32(rspi, 0, QSPI_SPBMUL0);
|
|
|
|
|
|
|
|
/* Resets transmit and receive buffer */
|
|
|
|
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
|
|
|
|
/* Sets buffer to allow normal operation */
|
|
|
|
rspi_write8(rspi, 0x00, QSPI_SPBFCR);
|
|
|
|
|
2019-03-13 02:45:13 +08:00
|
|
|
/* Resets sequencer */
|
|
|
|
rspi_write8(rspi, 0, RSPI_SPSCR);
|
2014-01-30 16:43:50 +08:00
|
|
|
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
|
2013-09-03 12:10:26 +08:00
|
|
|
|
2017-12-07 18:09:21 +08:00
|
|
|
/* Sets RSPI mode */
|
|
|
|
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
|
2013-09-03 12:10:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-23 11:14:13 +08:00
|
|
|
static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
|
|
|
|
{
|
|
|
|
u8 data;
|
|
|
|
|
|
|
|
data = rspi_read8(rspi, reg);
|
|
|
|
data &= ~mask;
|
|
|
|
data |= (val & mask);
|
|
|
|
rspi_write8(rspi, data, reg);
|
|
|
|
}
|
|
|
|
|
2015-06-23 21:04:29 +08:00
|
|
|
static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
|
|
|
|
unsigned int len)
|
2014-10-23 11:14:13 +08:00
|
|
|
{
|
|
|
|
unsigned int n;
|
|
|
|
|
|
|
|
n = min(len, QSPI_BUFFER_SIZE);
|
|
|
|
|
|
|
|
if (len >= QSPI_BUFFER_SIZE) {
|
|
|
|
/* sets triggering number to 32 bytes */
|
|
|
|
qspi_update(rspi, SPBFCR_TXTRG_MASK,
|
|
|
|
SPBFCR_TXTRG_32B, QSPI_SPBFCR);
|
|
|
|
} else {
|
|
|
|
/* sets triggering number to 1 byte */
|
|
|
|
qspi_update(rspi, SPBFCR_TXTRG_MASK,
|
|
|
|
SPBFCR_TXTRG_1B, QSPI_SPBFCR);
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2016-11-04 16:38:54 +08:00
|
|
|
static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
|
2014-10-23 11:14:13 +08:00
|
|
|
{
|
|
|
|
unsigned int n;
|
|
|
|
|
|
|
|
n = min(len, QSPI_BUFFER_SIZE);
|
|
|
|
|
|
|
|
if (len >= QSPI_BUFFER_SIZE) {
|
|
|
|
/* sets triggering number to 32 bytes */
|
|
|
|
qspi_update(rspi, SPBFCR_RXTRG_MASK,
|
|
|
|
SPBFCR_RXTRG_32B, QSPI_SPBFCR);
|
|
|
|
} else {
|
|
|
|
/* sets triggering number to 1 byte */
|
|
|
|
qspi_update(rspi, SPBFCR_RXTRG_MASK,
|
|
|
|
SPBFCR_RXTRG_1B, QSPI_SPBFCR);
|
|
|
|
}
|
2016-11-04 16:38:54 +08:00
|
|
|
return n;
|
2014-10-23 11:14:13 +08:00
|
|
|
}
|
|
|
|
|
2013-09-03 12:10:26 +08:00
|
|
|
#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
|
|
|
|
|
2013-12-24 17:49:32 +08:00
|
|
|
static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
|
|
|
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
|
|
|
|
}
|
|
|
|
|
2013-12-24 17:49:32 +08:00
|
|
|
static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
|
|
|
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
|
|
|
|
u8 enable_bit)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
|
spi: rspi: Only enable interrupts when there's a need to wait
rspi_wait_for_interrupt() unconditionally enables interrupts, even when the
wait condition is already satisfied. This causes a high interrupt load (2
interrupts/byte for full-duplex Single SPI transfers, 1 interrupt/byte for
RSPI with TX Only mode, or QSPI in unidirectional Dual or Quad Transfer
mode).
Change this to return immediately when the wait condition is satisfied.
This dramatically reduces the interrupt load, especially in high-speed
Quad Transfer mode, and increases transfer speed, as no interrupts need to
be handled when there's space available in the output FIFO, or data
available in the input FIFO.
Benchmark results for QSPI on r8a7791 while reading 1 MiB from 30 MHz SPI
FLASH on the Koelsch development board:
Before:
Single SPI Dual SPI Quad SPI
Interrupts: 2096856 1048592 1048594
Mbps: 0.9 1.6 1.6
After:
Single SPI Dual SPI Quad SPI
Interrupts: 1048569 21295 8
Mbps: 0.7 10.8 12.9
I don't know why Single SPI slowed down a bit.
I've also verified functionality for RSPI-RZ on r7s72100, but don't have
benchmark results as there's no SPI FLASH connected to RSPI on the Genmai
development board. Unlike RSPI and QSPI, RSPI-RZ has separate interrupts
for RX and TX, which shows that Single SPI transfers now generate (mostly)
RX interrupts, as expected.
Signed-off-by: Geert Uytterhoeven <geert+renesas@linux-m68k.org>
Signed-off-by: Mark Brown <broonie@linaro.org>
2014-02-04 18:06:24 +08:00
|
|
|
if (rspi->spsr & wait_mask)
|
|
|
|
return 0;
|
|
|
|
|
2012-03-07 13:46:25 +08:00
|
|
|
rspi_enable_irq(rspi, enable_bit);
|
|
|
|
ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
|
|
|
|
if (ret == 0 && !(rspi->spsr & wait_mask))
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-02 21:38:03 +08:00
|
|
|
static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
|
|
|
|
{
|
|
|
|
return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
|
|
|
|
{
|
|
|
|
return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
|
|
|
|
}
|
|
|
|
|
2014-01-24 16:43:54 +08:00
|
|
|
static int rspi_data_out(struct rspi_data *rspi, u8 data)
|
|
|
|
{
|
2014-06-02 21:38:03 +08:00
|
|
|
int error = rspi_wait_for_tx_empty(rspi);
|
|
|
|
if (error < 0) {
|
2019-02-08 17:09:07 +08:00
|
|
|
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
|
2014-06-02 21:38:03 +08:00
|
|
|
return error;
|
2014-01-24 16:43:54 +08:00
|
|
|
}
|
|
|
|
rspi_write_data(rspi, data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rspi_data_in(struct rspi_data *rspi)
|
|
|
|
{
|
2014-06-02 21:38:03 +08:00
|
|
|
int error;
|
2014-01-24 16:43:54 +08:00
|
|
|
u8 data;
|
|
|
|
|
2014-06-02 21:38:03 +08:00
|
|
|
error = rspi_wait_for_rx_full(rspi);
|
|
|
|
if (error < 0) {
|
2019-02-08 17:09:07 +08:00
|
|
|
dev_err(&rspi->ctlr->dev, "receive timeout\n");
|
2014-06-02 21:38:03 +08:00
|
|
|
return error;
|
2014-01-24 16:43:54 +08:00
|
|
|
}
|
|
|
|
data = rspi_read_data(rspi);
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2014-06-02 21:38:07 +08:00
|
|
|
static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
|
|
|
|
unsigned int n)
|
2014-01-24 16:43:54 +08:00
|
|
|
{
|
2014-06-02 21:38:07 +08:00
|
|
|
while (n-- > 0) {
|
|
|
|
if (tx) {
|
|
|
|
int ret = rspi_data_out(rspi, *tx++);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (rx) {
|
|
|
|
int ret = rspi_data_in(rspi);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
*rx++ = ret;
|
|
|
|
}
|
|
|
|
}
|
2014-01-24 16:43:54 +08:00
|
|
|
|
2014-06-02 21:38:07 +08:00
|
|
|
return 0;
|
2014-01-24 16:43:54 +08:00
|
|
|
}
|
|
|
|
|
2012-04-20 13:50:36 +08:00
|
|
|
static void rspi_dma_complete(void *arg)
|
|
|
|
{
|
|
|
|
struct rspi_data *rspi = arg;
|
|
|
|
|
|
|
|
rspi->dma_callbacked = 1;
|
|
|
|
wake_up_interruptible(&rspi->wait);
|
|
|
|
}
|
|
|
|
|
2014-06-02 21:38:15 +08:00
|
|
|
static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
|
|
|
|
struct sg_table *rx)
|
2012-04-20 13:50:36 +08:00
|
|
|
{
|
2014-06-02 21:38:15 +08:00
|
|
|
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
|
|
|
|
u8 irq_mask = 0;
|
|
|
|
unsigned int other_irq = 0;
|
|
|
|
dma_cookie_t cookie;
|
2014-06-02 21:38:12 +08:00
|
|
|
int ret;
|
2012-04-20 13:50:36 +08:00
|
|
|
|
2014-08-06 20:58:58 +08:00
|
|
|
/* First prepare and submit the DMA request(s), as this may fail */
|
2014-06-02 21:38:15 +08:00
|
|
|
if (rx) {
|
2019-02-08 17:09:07 +08:00
|
|
|
desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
|
|
|
|
rx->nents, DMA_DEV_TO_MEM,
|
2014-06-02 21:38:15 +08:00
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
2014-08-06 20:58:58 +08:00
|
|
|
if (!desc_rx) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto no_dma_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc_rx->callback = rspi_dma_complete;
|
|
|
|
desc_rx->callback_param = rspi;
|
|
|
|
cookie = dmaengine_submit(desc_rx);
|
|
|
|
if (dma_submit_error(cookie)) {
|
|
|
|
ret = cookie;
|
|
|
|
goto no_dma_rx;
|
|
|
|
}
|
2014-06-02 21:38:15 +08:00
|
|
|
|
|
|
|
irq_mask |= SPCR_SPRIE;
|
|
|
|
}
|
2012-04-20 13:50:36 +08:00
|
|
|
|
2014-08-06 20:58:58 +08:00
|
|
|
if (tx) {
|
2019-02-08 17:09:07 +08:00
|
|
|
desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
|
|
|
|
tx->nents, DMA_MEM_TO_DEV,
|
2014-08-06 20:58:58 +08:00
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
|
if (!desc_tx) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto no_dma_tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx) {
|
|
|
|
/* No callback */
|
|
|
|
desc_tx->callback = NULL;
|
|
|
|
} else {
|
|
|
|
desc_tx->callback = rspi_dma_complete;
|
|
|
|
desc_tx->callback_param = rspi;
|
|
|
|
}
|
|
|
|
cookie = dmaengine_submit(desc_tx);
|
|
|
|
if (dma_submit_error(cookie)) {
|
|
|
|
ret = cookie;
|
|
|
|
goto no_dma_tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_mask |= SPCR_SPTIE;
|
|
|
|
}
|
|
|
|
|
2012-04-20 13:50:36 +08:00
|
|
|
/*
|
2014-06-02 21:38:15 +08:00
|
|
|
* DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
|
2012-04-20 13:50:36 +08:00
|
|
|
* called. So, this driver disables the IRQ while DMA transfer.
|
|
|
|
*/
|
2014-06-02 21:38:15 +08:00
|
|
|
if (tx)
|
|
|
|
disable_irq(other_irq = rspi->tx_irq);
|
|
|
|
if (rx && rspi->rx_irq != other_irq)
|
|
|
|
disable_irq(rspi->rx_irq);
|
2012-04-20 13:50:36 +08:00
|
|
|
|
2014-06-02 21:38:15 +08:00
|
|
|
rspi_enable_irq(rspi, irq_mask);
|
2012-04-20 13:50:36 +08:00
|
|
|
rspi->dma_callbacked = 0;
|
|
|
|
|
2014-08-06 20:58:58 +08:00
|
|
|
/* Now start DMA */
|
|
|
|
if (rx)
|
2019-02-08 17:09:07 +08:00
|
|
|
dma_async_issue_pending(rspi->ctlr->dma_rx);
|
2014-08-06 20:58:58 +08:00
|
|
|
if (tx)
|
2019-02-08 17:09:07 +08:00
|
|
|
dma_async_issue_pending(rspi->ctlr->dma_tx);
|
2012-04-20 13:50:36 +08:00
|
|
|
|
|
|
|
ret = wait_event_interruptible_timeout(rspi->wait,
|
|
|
|
rspi->dma_callbacked, HZ);
|
2018-09-05 16:49:39 +08:00
|
|
|
if (ret > 0 && rspi->dma_callbacked) {
|
2012-04-20 13:50:36 +08:00
|
|
|
ret = 0;
|
2018-09-05 16:49:39 +08:00
|
|
|
} else {
|
|
|
|
if (!ret) {
|
2019-02-08 17:09:07 +08:00
|
|
|
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
|
2018-09-05 16:49:39 +08:00
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
}
|
2014-08-06 20:58:58 +08:00
|
|
|
if (tx)
|
2019-02-08 17:09:07 +08:00
|
|
|
dmaengine_terminate_all(rspi->ctlr->dma_tx);
|
2014-08-06 20:58:58 +08:00
|
|
|
if (rx)
|
2019-02-08 17:09:07 +08:00
|
|
|
dmaengine_terminate_all(rspi->ctlr->dma_rx);
|
2014-08-06 20:58:58 +08:00
|
|
|
}
|
2012-04-20 13:50:36 +08:00
|
|
|
|
2014-06-02 21:38:15 +08:00
|
|
|
rspi_disable_irq(rspi, irq_mask);
|
|
|
|
|
|
|
|
if (tx)
|
|
|
|
enable_irq(rspi->tx_irq);
|
|
|
|
if (rx && rspi->rx_irq != other_irq)
|
|
|
|
enable_irq(rspi->rx_irq);
|
|
|
|
|
2012-04-20 13:50:36 +08:00
|
|
|
return ret;
|
2014-07-09 18:26:22 +08:00
|
|
|
|
2014-08-06 20:58:58 +08:00
|
|
|
no_dma_tx:
|
|
|
|
if (rx)
|
2019-02-08 17:09:07 +08:00
|
|
|
dmaengine_terminate_all(rspi->ctlr->dma_rx);
|
2014-08-06 20:58:58 +08:00
|
|
|
no_dma_rx:
|
|
|
|
if (ret == -EAGAIN) {
|
|
|
|
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
|
2019-02-08 17:09:07 +08:00
|
|
|
dev_driver_string(&rspi->ctlr->dev),
|
|
|
|
dev_name(&rspi->ctlr->dev));
|
2014-08-06 20:58:58 +08:00
|
|
|
}
|
|
|
|
return ret;
|
2012-04-20 13:50:36 +08:00
|
|
|
}
|
|
|
|
|
2013-12-24 17:49:32 +08:00
|
|
|
static void rspi_receive_init(const struct rspi_data *rspi)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
2013-12-24 17:49:34 +08:00
|
|
|
u8 spsr;
|
2012-03-07 13:46:25 +08:00
|
|
|
|
|
|
|
spsr = rspi_read8(rspi, RSPI_SPSR);
|
|
|
|
if (spsr & SPSR_SPRF)
|
2014-01-24 16:43:53 +08:00
|
|
|
rspi_read_data(rspi); /* dummy read */
|
2012-03-07 13:46:25 +08:00
|
|
|
if (spsr & SPSR_OVRF)
|
|
|
|
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
|
2013-12-24 02:34:24 +08:00
|
|
|
RSPI_SPSR);
|
2012-04-20 13:50:36 +08:00
|
|
|
}
|
|
|
|
|
2014-01-24 16:43:59 +08:00
|
|
|
static void rspi_rz_receive_init(const struct rspi_data *rspi)
|
|
|
|
{
|
|
|
|
rspi_receive_init(rspi);
|
|
|
|
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
|
|
|
|
rspi_write8(rspi, 0, RSPI_SPBFCR);
|
|
|
|
}
|
|
|
|
|
2013-12-24 17:49:32 +08:00
|
|
|
static void qspi_receive_init(const struct rspi_data *rspi)
|
2013-10-10 16:14:03 +08:00
|
|
|
{
|
2013-12-24 17:49:34 +08:00
|
|
|
u8 spsr;
|
2013-10-10 16:14:03 +08:00
|
|
|
|
|
|
|
spsr = rspi_read8(rspi, RSPI_SPSR);
|
|
|
|
if (spsr & SPSR_SPRF)
|
2014-01-24 16:43:53 +08:00
|
|
|
rspi_read_data(rspi); /* dummy read */
|
2013-10-10 16:14:03 +08:00
|
|
|
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
|
2014-01-24 16:43:57 +08:00
|
|
|
rspi_write8(rspi, 0, QSPI_SPBFCR);
|
2013-10-10 16:14:03 +08:00
|
|
|
}
|
|
|
|
|
2014-06-02 21:38:12 +08:00
|
|
|
static bool __rspi_can_dma(const struct rspi_data *rspi,
|
|
|
|
const struct spi_transfer *xfer)
|
2012-04-20 13:50:36 +08:00
|
|
|
{
|
2014-06-02 21:38:12 +08:00
|
|
|
return xfer->len > rspi->ops->fifo_size;
|
|
|
|
}
|
2012-04-20 13:50:36 +08:00
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
|
2014-06-02 21:38:12 +08:00
|
|
|
struct spi_transfer *xfer)
|
|
|
|
{
|
2019-02-08 17:09:07 +08:00
|
|
|
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
|
2014-06-02 21:38:12 +08:00
|
|
|
|
|
|
|
return __rspi_can_dma(rspi, xfer);
|
2012-04-20 13:50:36 +08:00
|
|
|
}
|
|
|
|
|
2014-10-23 11:14:13 +08:00
|
|
|
static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
|
|
|
|
struct spi_transfer *xfer)
|
2014-06-02 21:38:19 +08:00
|
|
|
{
|
2019-02-08 17:09:07 +08:00
|
|
|
if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
|
2015-04-30 10:12:12 +08:00
|
|
|
return -EAGAIN;
|
2014-06-02 21:38:19 +08:00
|
|
|
|
2015-04-30 10:12:12 +08:00
|
|
|
/* rx_buf can be NULL on RSPI on SH in TX-only Mode */
|
|
|
|
return rspi_dma_transfer(rspi, &xfer->tx_sg,
|
|
|
|
xfer->rx_buf ? &xfer->rx_sg : NULL);
|
2014-10-23 11:14:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rspi_common_transfer(struct rspi_data *rspi,
|
|
|
|
struct spi_transfer *xfer)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = rspi_dma_check_then_transfer(rspi, xfer);
|
|
|
|
if (ret != -EAGAIN)
|
|
|
|
return ret;
|
|
|
|
|
2014-06-02 21:38:19 +08:00
|
|
|
ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Wait for the last transmission */
|
|
|
|
rspi_wait_for_tx_empty(rspi);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
static int rspi_transfer_one(struct spi_controller *ctlr,
|
|
|
|
struct spi_device *spi, struct spi_transfer *xfer)
|
2014-01-24 16:43:56 +08:00
|
|
|
{
|
2019-02-08 17:09:07 +08:00
|
|
|
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
|
2014-06-02 21:38:06 +08:00
|
|
|
u8 spcr;
|
2014-01-24 16:43:56 +08:00
|
|
|
|
|
|
|
spcr = rspi_read8(rspi, RSPI_SPCR);
|
2014-06-02 21:38:07 +08:00
|
|
|
if (xfer->rx_buf) {
|
2014-06-02 21:38:04 +08:00
|
|
|
rspi_receive_init(rspi);
|
2014-01-24 16:43:56 +08:00
|
|
|
spcr &= ~SPCR_TXMD;
|
2014-06-02 21:38:04 +08:00
|
|
|
} else {
|
2014-01-24 16:43:56 +08:00
|
|
|
spcr |= SPCR_TXMD;
|
2014-06-02 21:38:04 +08:00
|
|
|
}
|
2014-01-24 16:43:56 +08:00
|
|
|
rspi_write8(rspi, spcr, RSPI_SPCR);
|
|
|
|
|
2014-06-02 21:38:19 +08:00
|
|
|
return rspi_common_transfer(rspi, xfer);
|
2014-01-24 16:43:56 +08:00
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
static int rspi_rz_transfer_one(struct spi_controller *ctlr,
|
2014-06-02 21:38:16 +08:00
|
|
|
struct spi_device *spi,
|
|
|
|
struct spi_transfer *xfer)
|
2014-01-24 16:43:59 +08:00
|
|
|
{
|
2019-02-08 17:09:07 +08:00
|
|
|
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
|
2014-01-24 16:43:59 +08:00
|
|
|
|
|
|
|
rspi_rz_receive_init(rspi);
|
|
|
|
|
2014-06-02 21:38:19 +08:00
|
|
|
return rspi_common_transfer(rspi, xfer);
|
2014-01-24 16:43:59 +08:00
|
|
|
}
|
|
|
|
|
2015-05-22 17:59:36 +08:00
|
|
|
static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
|
2014-10-23 11:14:13 +08:00
|
|
|
u8 *rx, unsigned int len)
|
|
|
|
{
|
2015-06-23 21:04:29 +08:00
|
|
|
unsigned int i, n;
|
|
|
|
int ret;
|
2014-10-23 11:14:13 +08:00
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
n = qspi_set_send_trigger(rspi, len);
|
|
|
|
qspi_set_receive_trigger(rspi, len);
|
spi: rspi: Fix handling of QSPI code when transmit and receive
Process handling QSPI when transmit/receive at qspi_trigger_transfer_out_in() as follows:
Setting the trigger, is the number of bytes in the FIFO buffer to determine
when there is an interrupt. Then check if the value of triggering number is
32-bytes or 1-byte, there will be corresponding processing
Handling (if (n == QSPI_BUFFER_SIZE) esle) this is unnecessary, leads to the
same processing of data transmission or reception, The difference here are with
ret = rspi_wait_for_tx_empty(rspi);
ret = rspi_wait_for_rx_full(rspi);
When the nummber trigger is 32 bytes, we only write into FIFO when the FIFO is completely empty
(interrupt transmission), and only receive if FIFO is full of 32 bytes of data.
In the case of a nummber trigger that is 1 byte, in principle we still need to process
rspi_wait_for_tx_empty/full so that FIFO is empty only with the amount of data we need to write to
or equal to the number of bytes we need to receive, There is currently no processing of this.
And in the current case with this patch, at this time it only needs at least 1 byte received in
FIFO that has interrupt received, or FIFO at least 1bytes free can be written into FIFO,
This patch therefore does not affect this processing.
So we need to eliminate unnecessary waste processing (if (n == QSPI_BUFFER_SIZE) esle),
more precisely in waiting for FIFO status.
The same with handling in qspi_transfer_out()/qspi_transfer_in().
Signed-off-by: Hoan Nguyen An <na-hoan@jinso.co.jp>
Signed-off-by: Mark Brown <broonie@kernel.org>
2019-04-23 17:19:21 +08:00
|
|
|
ret = rspi_wait_for_tx_empty(rspi);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
for (i = 0; i < n; i++)
|
|
|
|
rspi_write_data(rspi, *tx++);
|
2014-10-23 11:14:13 +08:00
|
|
|
|
spi: rspi: Fix handling of QSPI code when transmit and receive
Process handling QSPI when transmit/receive at qspi_trigger_transfer_out_in() as follows:
Setting the trigger, is the number of bytes in the FIFO buffer to determine
when there is an interrupt. Then check if the value of triggering number is
32-bytes or 1-byte, there will be corresponding processing
Handling (if (n == QSPI_BUFFER_SIZE) esle) this is unnecessary, leads to the
same processing of data transmission or reception, The difference here are with
ret = rspi_wait_for_tx_empty(rspi);
ret = rspi_wait_for_rx_full(rspi);
When the nummber trigger is 32 bytes, we only write into FIFO when the FIFO is completely empty
(interrupt transmission), and only receive if FIFO is full of 32 bytes of data.
In the case of a nummber trigger that is 1 byte, in principle we still need to process
rspi_wait_for_tx_empty/full so that FIFO is empty only with the amount of data we need to write to
or equal to the number of bytes we need to receive, There is currently no processing of this.
And in the current case with this patch, at this time it only needs at least 1 byte received in
FIFO that has interrupt received, or FIFO at least 1bytes free can be written into FIFO,
This patch therefore does not affect this processing.
So we need to eliminate unnecessary waste processing (if (n == QSPI_BUFFER_SIZE) esle),
more precisely in waiting for FIFO status.
The same with handling in qspi_transfer_out()/qspi_transfer_in().
Signed-off-by: Hoan Nguyen An <na-hoan@jinso.co.jp>
Signed-off-by: Mark Brown <broonie@kernel.org>
2019-04-23 17:19:21 +08:00
|
|
|
ret = rspi_wait_for_rx_full(rspi);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&rspi->ctlr->dev, "receive timeout\n");
|
|
|
|
return ret;
|
2014-10-23 11:14:13 +08:00
|
|
|
}
|
spi: rspi: Fix handling of QSPI code when transmit and receive
Process handling QSPI when transmit/receive at qspi_trigger_transfer_out_in() as follows:
Setting the trigger, is the number of bytes in the FIFO buffer to determine
when there is an interrupt. Then check if the value of triggering number is
32-bytes or 1-byte, there will be corresponding processing
Handling (if (n == QSPI_BUFFER_SIZE) esle) this is unnecessary, leads to the
same processing of data transmission or reception, The difference here are with
ret = rspi_wait_for_tx_empty(rspi);
ret = rspi_wait_for_rx_full(rspi);
When the nummber trigger is 32 bytes, we only write into FIFO when the FIFO is completely empty
(interrupt transmission), and only receive if FIFO is full of 32 bytes of data.
In the case of a nummber trigger that is 1 byte, in principle we still need to process
rspi_wait_for_tx_empty/full so that FIFO is empty only with the amount of data we need to write to
or equal to the number of bytes we need to receive, There is currently no processing of this.
And in the current case with this patch, at this time it only needs at least 1 byte received in
FIFO that has interrupt received, or FIFO at least 1bytes free can be written into FIFO,
This patch therefore does not affect this processing.
So we need to eliminate unnecessary waste processing (if (n == QSPI_BUFFER_SIZE) esle),
more precisely in waiting for FIFO status.
The same with handling in qspi_transfer_out()/qspi_transfer_in().
Signed-off-by: Hoan Nguyen An <na-hoan@jinso.co.jp>
Signed-off-by: Mark Brown <broonie@kernel.org>
2019-04-23 17:19:21 +08:00
|
|
|
for (i = 0; i < n; i++)
|
|
|
|
*rx++ = rspi_read_data(rspi);
|
|
|
|
|
2014-10-23 11:14:13 +08:00
|
|
|
len -= n;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-24 16:43:57 +08:00
|
|
|
static int qspi_transfer_out_in(struct rspi_data *rspi,
|
|
|
|
struct spi_transfer *xfer)
|
2014-01-24 16:43:55 +08:00
|
|
|
{
|
2014-10-23 11:14:13 +08:00
|
|
|
int ret;
|
|
|
|
|
2014-01-24 16:43:57 +08:00
|
|
|
qspi_receive_init(rspi);
|
|
|
|
|
2014-10-23 11:14:13 +08:00
|
|
|
ret = rspi_dma_check_then_transfer(rspi, xfer);
|
|
|
|
if (ret != -EAGAIN)
|
|
|
|
return ret;
|
|
|
|
|
2015-05-22 17:59:37 +08:00
|
|
|
return qspi_trigger_transfer_out_in(rspi, xfer->tx_buf,
|
2014-10-23 11:14:13 +08:00
|
|
|
xfer->rx_buf, xfer->len);
|
2014-01-24 16:43:57 +08:00
|
|
|
}
|
|
|
|
|
2014-01-30 16:43:50 +08:00
|
|
|
static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
|
|
|
|
{
|
2016-11-08 21:46:12 +08:00
|
|
|
const u8 *tx = xfer->tx_buf;
|
|
|
|
unsigned int n = xfer->len;
|
|
|
|
unsigned int i, len;
|
2014-01-30 16:43:50 +08:00
|
|
|
int ret;
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
|
2014-07-09 18:26:22 +08:00
|
|
|
ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
|
|
|
|
if (ret != -EAGAIN)
|
|
|
|
return ret;
|
|
|
|
}
|
2014-06-02 21:38:17 +08:00
|
|
|
|
2016-11-08 21:46:12 +08:00
|
|
|
while (n > 0) {
|
|
|
|
len = qspi_set_send_trigger(rspi, n);
|
spi: rspi: Fix handling of QSPI code when transmit and receive
Process handling QSPI when transmit/receive at qspi_trigger_transfer_out_in() as follows:
Setting the trigger, is the number of bytes in the FIFO buffer to determine
when there is an interrupt. Then check if the value of triggering number is
32-bytes or 1-byte, there will be corresponding processing
Handling (if (n == QSPI_BUFFER_SIZE) esle) this is unnecessary, leads to the
same processing of data transmission or reception, The difference here are with
ret = rspi_wait_for_tx_empty(rspi);
ret = rspi_wait_for_rx_full(rspi);
When the nummber trigger is 32 bytes, we only write into FIFO when the FIFO is completely empty
(interrupt transmission), and only receive if FIFO is full of 32 bytes of data.
In the case of a nummber trigger that is 1 byte, in principle we still need to process
rspi_wait_for_tx_empty/full so that FIFO is empty only with the amount of data we need to write to
or equal to the number of bytes we need to receive, There is currently no processing of this.
And in the current case with this patch, at this time it only needs at least 1 byte received in
FIFO that has interrupt received, or FIFO at least 1bytes free can be written into FIFO,
This patch therefore does not affect this processing.
So we need to eliminate unnecessary waste processing (if (n == QSPI_BUFFER_SIZE) esle),
more precisely in waiting for FIFO status.
The same with handling in qspi_transfer_out()/qspi_transfer_in().
Signed-off-by: Hoan Nguyen An <na-hoan@jinso.co.jp>
Signed-off-by: Mark Brown <broonie@kernel.org>
2019-04-23 17:19:21 +08:00
|
|
|
ret = rspi_wait_for_tx_empty(rspi);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
|
|
|
|
return ret;
|
2016-11-08 21:46:12 +08:00
|
|
|
}
|
spi: rspi: Fix handling of QSPI code when transmit and receive
Process handling QSPI when transmit/receive at qspi_trigger_transfer_out_in() as follows:
Setting the trigger, is the number of bytes in the FIFO buffer to determine
when there is an interrupt. Then check if the value of triggering number is
32-bytes or 1-byte, there will be corresponding processing
Handling (if (n == QSPI_BUFFER_SIZE) esle) this is unnecessary, leads to the
same processing of data transmission or reception, The difference here are with
ret = rspi_wait_for_tx_empty(rspi);
ret = rspi_wait_for_rx_full(rspi);
When the nummber trigger is 32 bytes, we only write into FIFO when the FIFO is completely empty
(interrupt transmission), and only receive if FIFO is full of 32 bytes of data.
In the case of a nummber trigger that is 1 byte, in principle we still need to process
rspi_wait_for_tx_empty/full so that FIFO is empty only with the amount of data we need to write to
or equal to the number of bytes we need to receive, There is currently no processing of this.
And in the current case with this patch, at this time it only needs at least 1 byte received in
FIFO that has interrupt received, or FIFO at least 1bytes free can be written into FIFO,
This patch therefore does not affect this processing.
So we need to eliminate unnecessary waste processing (if (n == QSPI_BUFFER_SIZE) esle),
more precisely in waiting for FIFO status.
The same with handling in qspi_transfer_out()/qspi_transfer_in().
Signed-off-by: Hoan Nguyen An <na-hoan@jinso.co.jp>
Signed-off-by: Mark Brown <broonie@kernel.org>
2019-04-23 17:19:21 +08:00
|
|
|
for (i = 0; i < len; i++)
|
|
|
|
rspi_write_data(rspi, *tx++);
|
|
|
|
|
2016-11-08 21:46:12 +08:00
|
|
|
n -= len;
|
|
|
|
}
|
2014-01-30 16:43:50 +08:00
|
|
|
|
|
|
|
/* Wait for the last transmission */
|
2014-06-02 21:38:03 +08:00
|
|
|
rspi_wait_for_tx_empty(rspi);
|
2014-01-30 16:43:50 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
|
|
|
|
{
|
2016-11-08 21:46:12 +08:00
|
|
|
u8 *rx = xfer->rx_buf;
|
|
|
|
unsigned int n = xfer->len;
|
|
|
|
unsigned int i, len;
|
|
|
|
int ret;
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
|
2014-07-09 18:26:22 +08:00
|
|
|
int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
|
|
|
|
if (ret != -EAGAIN)
|
|
|
|
return ret;
|
|
|
|
}
|
2014-06-02 21:38:17 +08:00
|
|
|
|
2016-11-08 21:46:12 +08:00
|
|
|
while (n > 0) {
|
|
|
|
len = qspi_set_receive_trigger(rspi, n);
|
spi: rspi: Fix handling of QSPI code when transmit and receive
Process handling QSPI when transmit/receive at qspi_trigger_transfer_out_in() as follows:
Setting the trigger, is the number of bytes in the FIFO buffer to determine
when there is an interrupt. Then check if the value of triggering number is
32-bytes or 1-byte, there will be corresponding processing
Handling (if (n == QSPI_BUFFER_SIZE) esle) this is unnecessary, leads to the
same processing of data transmission or reception, The difference here are with
ret = rspi_wait_for_tx_empty(rspi);
ret = rspi_wait_for_rx_full(rspi);
When the nummber trigger is 32 bytes, we only write into FIFO when the FIFO is completely empty
(interrupt transmission), and only receive if FIFO is full of 32 bytes of data.
In the case of a nummber trigger that is 1 byte, in principle we still need to process
rspi_wait_for_tx_empty/full so that FIFO is empty only with the amount of data we need to write to
or equal to the number of bytes we need to receive, There is currently no processing of this.
And in the current case with this patch, at this time it only needs at least 1 byte received in
FIFO that has interrupt received, or FIFO at least 1bytes free can be written into FIFO,
This patch therefore does not affect this processing.
So we need to eliminate unnecessary waste processing (if (n == QSPI_BUFFER_SIZE) esle),
more precisely in waiting for FIFO status.
The same with handling in qspi_transfer_out()/qspi_transfer_in().
Signed-off-by: Hoan Nguyen An <na-hoan@jinso.co.jp>
Signed-off-by: Mark Brown <broonie@kernel.org>
2019-04-23 17:19:21 +08:00
|
|
|
ret = rspi_wait_for_rx_full(rspi);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&rspi->ctlr->dev, "receive timeout\n");
|
|
|
|
return ret;
|
2016-11-08 21:46:12 +08:00
|
|
|
}
|
spi: rspi: Fix handling of QSPI code when transmit and receive
Process handling QSPI when transmit/receive at qspi_trigger_transfer_out_in() as follows:
Setting the trigger, is the number of bytes in the FIFO buffer to determine
when there is an interrupt. Then check if the value of triggering number is
32-bytes or 1-byte, there will be corresponding processing
Handling (if (n == QSPI_BUFFER_SIZE) esle) this is unnecessary, leads to the
same processing of data transmission or reception, The difference here are with
ret = rspi_wait_for_tx_empty(rspi);
ret = rspi_wait_for_rx_full(rspi);
When the nummber trigger is 32 bytes, we only write into FIFO when the FIFO is completely empty
(interrupt transmission), and only receive if FIFO is full of 32 bytes of data.
In the case of a nummber trigger that is 1 byte, in principle we still need to process
rspi_wait_for_tx_empty/full so that FIFO is empty only with the amount of data we need to write to
or equal to the number of bytes we need to receive, There is currently no processing of this.
And in the current case with this patch, at this time it only needs at least 1 byte received in
FIFO that has interrupt received, or FIFO at least 1bytes free can be written into FIFO,
This patch therefore does not affect this processing.
So we need to eliminate unnecessary waste processing (if (n == QSPI_BUFFER_SIZE) esle),
more precisely in waiting for FIFO status.
The same with handling in qspi_transfer_out()/qspi_transfer_in().
Signed-off-by: Hoan Nguyen An <na-hoan@jinso.co.jp>
Signed-off-by: Mark Brown <broonie@kernel.org>
2019-04-23 17:19:21 +08:00
|
|
|
for (i = 0; i < len; i++)
|
|
|
|
*rx++ = rspi_read_data(rspi);
|
|
|
|
|
2016-11-08 21:46:12 +08:00
|
|
|
n -= len;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-01-30 16:43:50 +08:00
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
static int qspi_transfer_one(struct spi_controller *ctlr,
|
|
|
|
struct spi_device *spi, struct spi_transfer *xfer)
|
2014-01-24 16:43:57 +08:00
|
|
|
{
|
2019-02-08 17:09:07 +08:00
|
|
|
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
|
2014-01-24 16:43:57 +08:00
|
|
|
|
2014-02-22 00:29:18 +08:00
|
|
|
if (spi->mode & SPI_LOOP) {
|
|
|
|
return qspi_transfer_out_in(rspi, xfer);
|
2014-06-02 21:38:06 +08:00
|
|
|
} else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
|
2014-01-30 16:43:50 +08:00
|
|
|
/* Quad or Dual SPI Write */
|
|
|
|
return qspi_transfer_out(rspi, xfer);
|
2014-06-02 21:38:06 +08:00
|
|
|
} else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
|
2014-01-30 16:43:50 +08:00
|
|
|
/* Quad or Dual SPI Read */
|
|
|
|
return qspi_transfer_in(rspi, xfer);
|
|
|
|
} else {
|
|
|
|
/* Single SPI Transfer */
|
|
|
|
return qspi_transfer_out_in(rspi, xfer);
|
|
|
|
}
|
2012-03-07 13:46:25 +08:00
|
|
|
}
|
|
|
|
|
2014-01-30 16:43:50 +08:00
|
|
|
static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
|
|
|
|
{
|
|
|
|
if (xfer->tx_buf)
|
|
|
|
switch (xfer->tx_nbits) {
|
|
|
|
case SPI_NBITS_QUAD:
|
|
|
|
return SPCMD_SPIMOD_QUAD;
|
|
|
|
case SPI_NBITS_DUAL:
|
|
|
|
return SPCMD_SPIMOD_DUAL;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (xfer->rx_buf)
|
|
|
|
switch (xfer->rx_nbits) {
|
|
|
|
case SPI_NBITS_QUAD:
|
|
|
|
return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
|
|
|
|
case SPI_NBITS_DUAL:
|
|
|
|
return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qspi_setup_sequencer(struct rspi_data *rspi,
|
|
|
|
const struct spi_message *msg)
|
|
|
|
{
|
|
|
|
const struct spi_transfer *xfer;
|
|
|
|
unsigned int i = 0, len = 0;
|
|
|
|
u16 current_mode = 0xffff, mode;
|
|
|
|
|
|
|
|
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
|
|
|
|
mode = qspi_transfer_mode(xfer);
|
|
|
|
if (mode == current_mode) {
|
|
|
|
len += xfer->len;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Transfer mode change */
|
|
|
|
if (i) {
|
|
|
|
/* Set transfer data length of previous transfer */
|
|
|
|
rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i >= QSPI_NUM_SPCMD) {
|
|
|
|
dev_err(&msg->spi->dev,
|
|
|
|
"Too many different transfer modes");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Program transfer mode for this transfer */
|
|
|
|
rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
|
|
|
|
current_mode = mode;
|
|
|
|
len = xfer->len;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
if (i) {
|
|
|
|
/* Set final transfer data length and sequence length */
|
|
|
|
rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
|
|
|
|
rspi_write8(rspi, i - 1, RSPI_SPSCR);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
static int rspi_prepare_message(struct spi_controller *ctlr,
|
2014-01-30 16:43:50 +08:00
|
|
|
struct spi_message *msg)
|
2014-01-24 16:43:52 +08:00
|
|
|
{
|
2019-02-08 17:09:07 +08:00
|
|
|
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
|
spi: rspi: Fix register initialization while runtime-suspended
The Renesas RSPI/QSPI driver performs SPI controller register
initialization in its spi_operations.setup() callback, without calling
pm_runtime_get_sync() first, which may cause spurious failures.
So far this went unnoticed, as this SPI controller is typically used
with a single SPI NOR FLASH containing the boot loader:
1. If the device's module clock is still enabled (left enabled by the
bootloader, and not yet disabled by the clk_disable_unused() late
initcall), register initialization succeeds,
2. If the device's module clock is disabled, register writes don't
seem to cause lock-ups or crashes.
Data received in the first SPI message may be corrupted, though.
Subsequent SPI messages seem to be OK.
E.g. on r8a7791/koelsch, one bit is lost while receiving the 6th
byte of the JEDEC ID for the s25fl512s FLASH, corrupting that byte
and all later bytes. But until commit a2126b0a010905e5 ("mtd:
spi-nor: refine Spansion S25FL512S ID"), the 6th byte was not
considered for FLASH identification.
Fix this by moving all initialization from the .setup() to the
.prepare_message() callback. The latter is always called after the
device has been runtime-resumed by the SPI core.
This also makes the driver follow the rule that .setup() must not change
global driver state or register values, as that might break a transfer
in progress.
Fixes: 490c97747d5dc77d ("spi: rspi: Add runtime PM support, using spi core auto_runtime_pm")
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Mark Brown <broonie@kernel.org>
2019-03-13 02:43:31 +08:00
|
|
|
struct spi_device *spi = msg->spi;
|
2014-01-30 16:43:50 +08:00
|
|
|
int ret;
|
2012-03-07 13:46:25 +08:00
|
|
|
|
spi: rspi: Fix register initialization while runtime-suspended
The Renesas RSPI/QSPI driver performs SPI controller register
initialization in its spi_operations.setup() callback, without calling
pm_runtime_get_sync() first, which may cause spurious failures.
So far this went unnoticed, as this SPI controller is typically used
with a single SPI NOR FLASH containing the boot loader:
1. If the device's module clock is still enabled (left enabled by the
bootloader, and not yet disabled by the clk_disable_unused() late
initcall), register initialization succeeds,
2. If the device's module clock is disabled, register writes don't
seem to cause lock-ups or crashes.
Data received in the first SPI message may be corrupted, though.
Subsequent SPI messages seem to be OK.
E.g. on r8a7791/koelsch, one bit is lost while receiving the 6th
byte of the JEDEC ID for the s25fl512s FLASH, corrupting that byte
and all later bytes. But until commit a2126b0a010905e5 ("mtd:
spi-nor: refine Spansion S25FL512S ID"), the 6th byte was not
considered for FLASH identification.
Fix this by moving all initialization from the .setup() to the
.prepare_message() callback. The latter is always called after the
device has been runtime-resumed by the SPI core.
This also makes the driver follow the rule that .setup() must not change
global driver state or register values, as that might break a transfer
in progress.
Fixes: 490c97747d5dc77d ("spi: rspi: Add runtime PM support, using spi core auto_runtime_pm")
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Mark Brown <broonie@kernel.org>
2019-03-13 02:43:31 +08:00
|
|
|
rspi->max_speed_hz = spi->max_speed_hz;
|
|
|
|
|
|
|
|
rspi->spcmd = SPCMD_SSLKP;
|
|
|
|
if (spi->mode & SPI_CPOL)
|
|
|
|
rspi->spcmd |= SPCMD_CPOL;
|
|
|
|
if (spi->mode & SPI_CPHA)
|
|
|
|
rspi->spcmd |= SPCMD_CPHA;
|
|
|
|
|
|
|
|
/* CMOS output mode and MOSI signal from previous transfer */
|
|
|
|
rspi->sppcr = 0;
|
|
|
|
if (spi->mode & SPI_LOOP)
|
|
|
|
rspi->sppcr |= SPPCR_SPLP;
|
|
|
|
|
|
|
|
set_config_register(rspi, 8);
|
|
|
|
|
2014-01-30 16:43:50 +08:00
|
|
|
if (msg->spi->mode &
|
|
|
|
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
|
|
|
|
/* Setup sequencer for messages with multiple transfer modes */
|
|
|
|
ret = qspi_setup_sequencer(rspi, msg);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable SPI function in master mode */
|
2014-01-24 16:43:52 +08:00
|
|
|
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
|
2012-03-07 13:46:25 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
static int rspi_unprepare_message(struct spi_controller *ctlr,
|
2014-01-30 16:43:50 +08:00
|
|
|
struct spi_message *msg)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
2019-02-08 17:09:07 +08:00
|
|
|
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
|
2014-01-24 16:43:52 +08:00
|
|
|
|
2014-01-30 16:43:50 +08:00
|
|
|
/* Disable SPI function */
|
2014-01-24 16:43:52 +08:00
|
|
|
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
|
2014-01-30 16:43:50 +08:00
|
|
|
|
|
|
|
/* Reset sequencer for Single SPI Transfers */
|
|
|
|
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
|
|
|
|
rspi_write8(rspi, 0, RSPI_SPSCR);
|
2014-01-24 16:43:52 +08:00
|
|
|
return 0;
|
2012-03-07 13:46:25 +08:00
|
|
|
}
|
|
|
|
|
2014-01-24 16:43:58 +08:00
|
|
|
static irqreturn_t rspi_irq_mux(int irq, void *_sr)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
2013-12-24 17:49:31 +08:00
|
|
|
struct rspi_data *rspi = _sr;
|
2013-12-24 17:49:34 +08:00
|
|
|
u8 spsr;
|
2012-03-07 13:46:25 +08:00
|
|
|
irqreturn_t ret = IRQ_NONE;
|
2013-12-24 17:49:34 +08:00
|
|
|
u8 disable_irq = 0;
|
2012-03-07 13:46:25 +08:00
|
|
|
|
|
|
|
rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
|
|
|
|
if (spsr & SPSR_SPRF)
|
|
|
|
disable_irq |= SPCR_SPRIE;
|
|
|
|
if (spsr & SPSR_SPTEF)
|
|
|
|
disable_irq |= SPCR_SPTIE;
|
|
|
|
|
|
|
|
if (disable_irq) {
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
rspi_disable_irq(rspi, disable_irq);
|
|
|
|
wake_up(&rspi->wait);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-01-24 16:43:58 +08:00
|
|
|
static irqreturn_t rspi_irq_rx(int irq, void *_sr)
|
|
|
|
{
|
|
|
|
struct rspi_data *rspi = _sr;
|
|
|
|
u8 spsr;
|
|
|
|
|
|
|
|
rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
|
|
|
|
if (spsr & SPSR_SPRF) {
|
|
|
|
rspi_disable_irq(rspi, SPCR_SPRIE);
|
|
|
|
wake_up(&rspi->wait);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t rspi_irq_tx(int irq, void *_sr)
|
|
|
|
{
|
|
|
|
struct rspi_data *rspi = _sr;
|
|
|
|
u8 spsr;
|
|
|
|
|
|
|
|
rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
|
|
|
|
if (spsr & SPSR_SPTEF) {
|
|
|
|
rspi_disable_irq(rspi, SPCR_SPTIE);
|
|
|
|
wake_up(&rspi->wait);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-02 21:38:09 +08:00
|
|
|
static struct dma_chan *rspi_request_dma_chan(struct device *dev,
|
|
|
|
enum dma_transfer_direction dir,
|
|
|
|
unsigned int id,
|
|
|
|
dma_addr_t port_addr)
|
2012-04-20 13:50:36 +08:00
|
|
|
{
|
|
|
|
dma_cap_mask_t mask;
|
2014-06-02 21:38:09 +08:00
|
|
|
struct dma_chan *chan;
|
2012-08-02 16:17:33 +08:00
|
|
|
struct dma_slave_config cfg;
|
|
|
|
int ret;
|
2012-04-20 13:50:36 +08:00
|
|
|
|
2014-06-02 21:38:09 +08:00
|
|
|
dma_cap_zero(mask);
|
|
|
|
dma_cap_set(DMA_SLAVE, mask);
|
|
|
|
|
2014-08-06 20:59:02 +08:00
|
|
|
chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
|
|
|
|
(void *)(unsigned long)id, dev,
|
|
|
|
dir == DMA_MEM_TO_DEV ? "tx" : "rx");
|
2014-06-02 21:38:09 +08:00
|
|
|
if (!chan) {
|
2014-08-06 20:59:02 +08:00
|
|
|
dev_warn(dev, "dma_request_slave_channel_compat failed\n");
|
2014-06-02 21:38:09 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&cfg, 0, sizeof(cfg));
|
|
|
|
cfg.direction = dir;
|
2014-08-06 20:59:01 +08:00
|
|
|
if (dir == DMA_MEM_TO_DEV) {
|
2014-06-02 21:38:09 +08:00
|
|
|
cfg.dst_addr = port_addr;
|
2014-08-06 20:59:01 +08:00
|
|
|
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
|
|
} else {
|
2014-06-02 21:38:09 +08:00
|
|
|
cfg.src_addr = port_addr;
|
2014-08-06 20:59:01 +08:00
|
|
|
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
|
|
}
|
2014-06-02 21:38:09 +08:00
|
|
|
|
|
|
|
ret = dmaengine_slave_config(chan, &cfg);
|
|
|
|
if (ret) {
|
|
|
|
dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
|
|
|
|
dma_release_channel(chan);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return chan;
|
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
|
2014-06-02 21:38:10 +08:00
|
|
|
const struct resource *res)
|
2014-06-02 21:38:09 +08:00
|
|
|
{
|
2014-06-02 21:38:10 +08:00
|
|
|
const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
|
2014-08-06 20:59:02 +08:00
|
|
|
unsigned int dma_tx_id, dma_rx_id;
|
|
|
|
|
|
|
|
if (dev->of_node) {
|
|
|
|
/* In the OF case we will get the slave IDs from the DT */
|
|
|
|
dma_tx_id = 0;
|
|
|
|
dma_rx_id = 0;
|
|
|
|
} else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
|
|
|
|
dma_tx_id = rspi_pd->dma_tx_id;
|
|
|
|
dma_rx_id = rspi_pd->dma_rx_id;
|
|
|
|
} else {
|
|
|
|
/* The driver assumes no error. */
|
|
|
|
return 0;
|
|
|
|
}
|
2014-06-02 21:38:09 +08:00
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
|
|
|
|
res->start + RSPI_SPDR);
|
|
|
|
if (!ctlr->dma_tx)
|
2014-06-02 21:38:11 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
|
|
|
|
res->start + RSPI_SPDR);
|
|
|
|
if (!ctlr->dma_rx) {
|
|
|
|
dma_release_channel(ctlr->dma_tx);
|
|
|
|
ctlr->dma_tx = NULL;
|
2014-06-02 21:38:11 +08:00
|
|
|
return -ENODEV;
|
2012-04-20 13:50:36 +08:00
|
|
|
}
|
2012-08-02 16:17:33 +08:00
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
ctlr->can_dma = rspi_can_dma;
|
2014-06-02 21:38:11 +08:00
|
|
|
dev_info(dev, "DMA available");
|
2012-08-02 16:17:33 +08:00
|
|
|
return 0;
|
2012-04-20 13:50:36 +08:00
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
static void rspi_release_dma(struct spi_controller *ctlr)
|
2012-04-20 13:50:36 +08:00
|
|
|
{
|
2019-02-08 17:09:07 +08:00
|
|
|
if (ctlr->dma_tx)
|
|
|
|
dma_release_channel(ctlr->dma_tx);
|
|
|
|
if (ctlr->dma_rx)
|
|
|
|
dma_release_channel(ctlr->dma_rx);
|
2012-04-20 13:50:36 +08:00
|
|
|
}
|
|
|
|
|
2012-12-08 00:57:14 +08:00
|
|
|
static int rspi_remove(struct platform_device *pdev)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
2013-11-27 08:41:45 +08:00
|
|
|
struct rspi_data *rspi = platform_get_drvdata(pdev);
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
rspi_release_dma(rspi->ctlr);
|
2014-03-11 17:59:12 +08:00
|
|
|
pm_runtime_disable(&pdev->dev);
|
2012-03-07 13:46:25 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-28 17:21:38 +08:00
|
|
|
static const struct spi_ops rspi_ops = {
|
2014-06-02 21:38:06 +08:00
|
|
|
.set_config_register = rspi_set_config_register,
|
|
|
|
.transfer_one = rspi_transfer_one,
|
|
|
|
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
|
2019-02-08 17:09:07 +08:00
|
|
|
.flags = SPI_CONTROLLER_MUST_TX,
|
2014-06-02 21:38:12 +08:00
|
|
|
.fifo_size = 8,
|
2014-01-28 17:21:38 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct spi_ops rspi_rz_ops = {
|
2014-06-02 21:38:06 +08:00
|
|
|
.set_config_register = rspi_rz_set_config_register,
|
|
|
|
.transfer_one = rspi_rz_transfer_one,
|
|
|
|
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
|
2019-02-08 17:09:07 +08:00
|
|
|
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
|
2014-06-02 21:38:12 +08:00
|
|
|
.fifo_size = 8, /* 8 for TX, 32 for RX */
|
2014-01-28 17:21:38 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct spi_ops qspi_ops = {
|
2014-06-02 21:38:06 +08:00
|
|
|
.set_config_register = qspi_set_config_register,
|
|
|
|
.transfer_one = qspi_transfer_one,
|
|
|
|
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
|
|
|
|
SPI_TX_DUAL | SPI_TX_QUAD |
|
|
|
|
SPI_RX_DUAL | SPI_RX_QUAD,
|
2019-02-08 17:09:07 +08:00
|
|
|
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
|
2014-06-02 21:38:12 +08:00
|
|
|
.fifo_size = 32,
|
2014-01-28 17:21:38 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_OF
|
|
|
|
static const struct of_device_id rspi_of_match[] = {
|
|
|
|
/* RSPI on legacy SH */
|
|
|
|
{ .compatible = "renesas,rspi", .data = &rspi_ops },
|
|
|
|
/* RSPI on RZ/A1H */
|
|
|
|
{ .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
|
|
|
|
/* QSPI on R-Car Gen2 */
|
|
|
|
{ .compatible = "renesas,qspi", .data = &qspi_ops },
|
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(of, rspi_of_match);
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
|
2014-01-28 17:21:38 +08:00
|
|
|
{
|
|
|
|
u32 num_cs;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Parse DT properties */
|
|
|
|
error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
|
|
|
|
if (error) {
|
|
|
|
dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
ctlr->num_chipselect = num_cs;
|
2014-01-28 17:21:38 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#else
|
2014-02-03 09:43:46 +08:00
|
|
|
#define rspi_of_match NULL
|
2019-02-08 17:09:07 +08:00
|
|
|
static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
|
2014-01-28 17:21:38 +08:00
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_OF */
|
|
|
|
|
2014-01-24 16:43:58 +08:00
|
|
|
static int rspi_request_irq(struct device *dev, unsigned int irq,
|
|
|
|
irq_handler_t handler, const char *suffix,
|
|
|
|
void *dev_id)
|
|
|
|
{
|
2014-08-06 20:59:00 +08:00
|
|
|
const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
|
|
|
|
dev_name(dev), suffix);
|
2014-01-24 16:43:58 +08:00
|
|
|
if (!name)
|
|
|
|
return -ENOMEM;
|
2014-08-06 20:59:00 +08:00
|
|
|
|
2014-01-24 16:43:58 +08:00
|
|
|
return devm_request_irq(dev, irq, handler, 0, name, dev_id);
|
|
|
|
}
|
|
|
|
|
2012-12-08 00:57:14 +08:00
|
|
|
static int rspi_probe(struct platform_device *pdev)
|
2012-03-07 13:46:25 +08:00
|
|
|
{
|
|
|
|
struct resource *res;
|
2019-02-08 17:09:07 +08:00
|
|
|
struct spi_controller *ctlr;
|
2012-03-07 13:46:25 +08:00
|
|
|
struct rspi_data *rspi;
|
2014-01-24 16:43:58 +08:00
|
|
|
int ret;
|
2014-01-28 17:21:38 +08:00
|
|
|
const struct rspi_plat_data *rspi_pd;
|
2013-09-03 12:10:26 +08:00
|
|
|
const struct spi_ops *ops;
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
|
|
|
|
if (ctlr == NULL)
|
2012-03-07 13:46:25 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-10-04 20:19:53 +08:00
|
|
|
ops = of_device_get_match_data(&pdev->dev);
|
|
|
|
if (ops) {
|
2019-02-08 17:09:07 +08:00
|
|
|
ret = rspi_parse_dt(&pdev->dev, ctlr);
|
2014-01-28 17:21:38 +08:00
|
|
|
if (ret)
|
|
|
|
goto error1;
|
|
|
|
} else {
|
|
|
|
ops = (struct spi_ops *)pdev->id_entry->driver_data;
|
|
|
|
rspi_pd = dev_get_platdata(&pdev->dev);
|
|
|
|
if (rspi_pd && rspi_pd->num_chipselect)
|
2019-02-08 17:09:07 +08:00
|
|
|
ctlr->num_chipselect = rspi_pd->num_chipselect;
|
2014-01-28 17:21:38 +08:00
|
|
|
else
|
2019-02-08 17:09:07 +08:00
|
|
|
ctlr->num_chipselect = 2; /* default */
|
2014-08-06 20:58:59 +08:00
|
|
|
}
|
2014-01-28 17:21:38 +08:00
|
|
|
|
|
|
|
/* ops parameter check */
|
|
|
|
if (!ops->set_config_register) {
|
|
|
|
dev_err(&pdev->dev, "there is no set_config_register\n");
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto error1;
|
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
rspi = spi_controller_get_devdata(ctlr);
|
2013-05-23 18:20:40 +08:00
|
|
|
platform_set_drvdata(pdev, rspi);
|
2013-09-03 12:10:26 +08:00
|
|
|
rspi->ops = ops;
|
2019-02-08 17:09:07 +08:00
|
|
|
rspi->ctlr = ctlr;
|
2013-11-27 08:41:46 +08:00
|
|
|
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
rspi->addr = devm_ioremap_resource(&pdev->dev, res);
|
|
|
|
if (IS_ERR(rspi->addr)) {
|
|
|
|
ret = PTR_ERR(rspi->addr);
|
2012-03-07 13:46:25 +08:00
|
|
|
goto error1;
|
|
|
|
}
|
|
|
|
|
2014-01-24 16:44:02 +08:00
|
|
|
rspi->clk = devm_clk_get(&pdev->dev, NULL);
|
2012-03-07 13:46:25 +08:00
|
|
|
if (IS_ERR(rspi->clk)) {
|
|
|
|
dev_err(&pdev->dev, "cannot get clock\n");
|
|
|
|
ret = PTR_ERR(rspi->clk);
|
2013-11-27 08:41:46 +08:00
|
|
|
goto error1;
|
2012-03-07 13:46:25 +08:00
|
|
|
}
|
2014-01-24 16:44:01 +08:00
|
|
|
|
2014-03-11 17:59:12 +08:00
|
|
|
pm_runtime_enable(&pdev->dev);
|
2012-03-07 13:46:25 +08:00
|
|
|
|
|
|
|
init_waitqueue_head(&rspi->wait);
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
ctlr->bus_num = pdev->id;
|
|
|
|
ctlr->auto_runtime_pm = true;
|
|
|
|
ctlr->transfer_one = ops->transfer_one;
|
|
|
|
ctlr->prepare_message = rspi_prepare_message;
|
|
|
|
ctlr->unprepare_message = rspi_unprepare_message;
|
|
|
|
ctlr->mode_bits = ops->mode_bits;
|
|
|
|
ctlr->flags = ops->flags;
|
|
|
|
ctlr->dev.of_node = pdev->dev.of_node;
|
2012-03-07 13:46:25 +08:00
|
|
|
|
2019-10-16 22:31:01 +08:00
|
|
|
ret = platform_get_irq_byname_optional(pdev, "rx");
|
2014-01-24 16:43:58 +08:00
|
|
|
if (ret < 0) {
|
2019-10-16 22:31:01 +08:00
|
|
|
ret = platform_get_irq_byname_optional(pdev, "mux");
|
2014-01-24 16:43:58 +08:00
|
|
|
if (ret < 0)
|
|
|
|
ret = platform_get_irq(pdev, 0);
|
|
|
|
if (ret >= 0)
|
|
|
|
rspi->rx_irq = rspi->tx_irq = ret;
|
|
|
|
} else {
|
|
|
|
rspi->rx_irq = ret;
|
|
|
|
ret = platform_get_irq_byname(pdev, "tx");
|
|
|
|
if (ret >= 0)
|
|
|
|
rspi->tx_irq = ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rspi->rx_irq == rspi->tx_irq) {
|
|
|
|
/* Single multiplexed interrupt */
|
|
|
|
ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
|
|
|
|
"mux", rspi);
|
|
|
|
} else {
|
|
|
|
/* Multi-interrupt mode, only SPRI and SPTI are used */
|
|
|
|
ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
|
|
|
|
"rx", rspi);
|
|
|
|
if (!ret)
|
|
|
|
ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
|
|
|
|
rspi_irq_tx, "tx", rspi);
|
|
|
|
}
|
2012-03-07 13:46:25 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&pdev->dev, "request_irq error\n");
|
2014-01-14 17:20:33 +08:00
|
|
|
goto error2;
|
2012-03-07 13:46:25 +08:00
|
|
|
}
|
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
ret = rspi_request_dma(&pdev->dev, ctlr, res);
|
2014-06-02 21:38:08 +08:00
|
|
|
if (ret < 0)
|
|
|
|
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
|
2012-04-20 13:50:36 +08:00
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
ret = devm_spi_register_controller(&pdev->dev, ctlr);
|
2012-03-07 13:46:25 +08:00
|
|
|
if (ret < 0) {
|
2019-02-08 17:09:07 +08:00
|
|
|
dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
|
2014-01-14 17:20:33 +08:00
|
|
|
goto error3;
|
2012-03-07 13:46:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
dev_info(&pdev->dev, "probed\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2014-01-14 17:20:33 +08:00
|
|
|
error3:
|
2019-02-08 17:09:07 +08:00
|
|
|
rspi_release_dma(ctlr);
|
2014-01-14 17:20:33 +08:00
|
|
|
error2:
|
2014-03-11 17:59:12 +08:00
|
|
|
pm_runtime_disable(&pdev->dev);
|
2012-03-07 13:46:25 +08:00
|
|
|
error1:
|
2019-02-08 17:09:07 +08:00
|
|
|
spi_controller_put(ctlr);
|
2012-03-07 13:46:25 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-05-01 23:44:05 +08:00
|
|
|
static const struct platform_device_id spi_driver_ids[] = {
|
2013-09-03 12:10:26 +08:00
|
|
|
{ "rspi", (kernel_ulong_t)&rspi_ops },
|
2014-01-24 16:43:59 +08:00
|
|
|
{ "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
|
2013-09-03 12:10:26 +08:00
|
|
|
{ "qspi", (kernel_ulong_t)&qspi_ops },
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
|
|
|
|
|
2018-09-05 16:49:38 +08:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
static int rspi_suspend(struct device *dev)
|
|
|
|
{
|
2018-10-22 04:00:45 +08:00
|
|
|
struct rspi_data *rspi = dev_get_drvdata(dev);
|
2018-09-05 16:49:38 +08:00
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
return spi_controller_suspend(rspi->ctlr);
|
2018-09-05 16:49:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rspi_resume(struct device *dev)
|
|
|
|
{
|
2018-10-22 04:00:45 +08:00
|
|
|
struct rspi_data *rspi = dev_get_drvdata(dev);
|
2018-09-05 16:49:38 +08:00
|
|
|
|
2019-02-08 17:09:07 +08:00
|
|
|
return spi_controller_resume(rspi->ctlr);
|
2018-09-05 16:49:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
|
|
|
|
#define DEV_PM_OPS &rspi_pm_ops
|
|
|
|
#else
|
|
|
|
#define DEV_PM_OPS NULL
|
|
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
|
|
|
2012-03-07 13:46:25 +08:00
|
|
|
static struct platform_driver rspi_driver = {
|
|
|
|
.probe = rspi_probe,
|
2012-12-08 00:57:14 +08:00
|
|
|
.remove = rspi_remove,
|
2013-09-03 12:10:26 +08:00
|
|
|
.id_table = spi_driver_ids,
|
2012-03-07 13:46:25 +08:00
|
|
|
.driver = {
|
2013-09-03 12:10:26 +08:00
|
|
|
.name = "renesas_spi",
|
2018-09-05 16:49:38 +08:00
|
|
|
.pm = DEV_PM_OPS,
|
2014-01-28 17:21:38 +08:00
|
|
|
.of_match_table = of_match_ptr(rspi_of_match),
|
2012-03-07 13:46:25 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
module_platform_driver(rspi_driver);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Renesas RSPI bus driver");
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_AUTHOR("Yoshihiro Shimoda");
|
|
|
|
MODULE_ALIAS("platform:rspi");
|