can: hi311x: remove custom DMA mapped buffer

There is no need to duplicate what SPI core already does, i.e. mapping buffers
for DMA capable transfers. This patch removes all related pices of code.

Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
This commit is contained in:
Marc Kleine-Budde 2019-08-19 17:08:29 +02:00
parent 1f0dee39e3
commit 653ee35ce6
1 changed files with 10 additions and 49 deletions

View File

@ -21,7 +21,6 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h> #include <linux/io.h>
@ -126,10 +125,6 @@
#define DEVICE_NAME "hi3110" #define DEVICE_NAME "hi3110"
static int hi3110_enable_dma = 1; /* Enable SPI DMA. Default: 1 (On) */
module_param(hi3110_enable_dma, int, 0444);
MODULE_PARM_DESC(hi3110_enable_dma, "Enable SPI DMA. Default: 1 (On)");
static const struct can_bittiming_const hi3110_bittiming_const = { static const struct can_bittiming_const hi3110_bittiming_const = {
.name = DEVICE_NAME, .name = DEVICE_NAME,
.tseg1_min = 2, .tseg1_min = 2,
@ -156,8 +151,6 @@ struct hi3110_priv {
u8 *spi_tx_buf; u8 *spi_tx_buf;
u8 *spi_rx_buf; u8 *spi_rx_buf;
dma_addr_t spi_tx_dma;
dma_addr_t spi_rx_dma;
struct sk_buff *tx_skb; struct sk_buff *tx_skb;
int tx_len; int tx_len;
@ -217,13 +210,6 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
int ret; int ret;
spi_message_init(&m); spi_message_init(&m);
if (hi3110_enable_dma) {
t.tx_dma = priv->spi_tx_dma;
t.rx_dma = priv->spi_rx_dma;
m.is_dma_mapped = 1;
}
spi_message_add_tail(&t, &m); spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m); ret = spi_sync(spi, &m);
@ -915,43 +901,18 @@ static int hi3110_can_probe(struct spi_device *spi)
priv->spi = spi; priv->spi = spi;
mutex_init(&priv->hi3110_lock); mutex_init(&priv->hi3110_lock);
/* If requested, allocate DMA buffers */ priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
if (hi3110_enable_dma) { GFP_KERNEL);
spi->dev.coherent_dma_mask = ~0; if (!priv->spi_tx_buf) {
ret = -ENOMEM;
/* Minimum coherent DMA allocation is PAGE_SIZE, so allocate goto error_probe;
* that much and share it between Tx and Rx DMA buffers.
*/
priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev,
PAGE_SIZE,
&priv->spi_tx_dma,
GFP_DMA);
if (priv->spi_tx_buf) {
priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
(PAGE_SIZE / 2));
} else {
/* Fall back to non-DMA */
hi3110_enable_dma = 0;
}
} }
priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
GFP_KERNEL);
/* Allocate non-DMA buffers */ if (!priv->spi_rx_buf) {
if (!hi3110_enable_dma) { ret = -ENOMEM;
priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, goto error_probe;
GFP_KERNEL);
if (!priv->spi_tx_buf) {
ret = -ENOMEM;
goto error_probe;
}
priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
GFP_KERNEL);
if (!priv->spi_rx_buf) {
ret = -ENOMEM;
goto error_probe;
}
} }
SET_NETDEV_DEV(net, &spi->dev); SET_NETDEV_DEV(net, &spi->dev);