2011-03-23 19:42:44 +08:00
|
|
|
/*
|
2017-06-21 22:00:29 +08:00
|
|
|
* DMA support use of SYS DMAC with SDHI SD/SDIO controller
|
2011-03-23 19:42:44 +08:00
|
|
|
*
|
2017-05-30 20:50:52 +08:00
|
|
|
* Copyright (C) 2016-17 Renesas Electronics Corporation
|
|
|
|
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
|
|
|
|
* Copyright (C) 2017 Horms Solutions, Simon Horman
|
2011-03-23 19:42:44 +08:00
|
|
|
* Copyright (C) 2010-2011 Guennadi Liakhovetski
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/device.h>
|
2011-06-16 19:01:34 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
2011-03-23 19:42:44 +08:00
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/mfd/tmio.h>
|
|
|
|
#include <linux/mmc/host.h>
|
2017-05-10 17:25:30 +08:00
|
|
|
#include <linux/mod_devicetable.h>
|
|
|
|
#include <linux/module.h>
|
2017-08-02 20:48:42 +08:00
|
|
|
#include <linux/of_device.h>
|
2011-03-23 19:42:44 +08:00
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/scatterlist.h>
|
2017-08-02 20:48:42 +08:00
|
|
|
#include <linux/sys_soc.h>
|
2011-03-23 19:42:44 +08:00
|
|
|
|
2017-05-10 17:25:30 +08:00
|
|
|
#include "renesas_sdhi.h"
|
2011-03-23 19:42:44 +08:00
|
|
|
#include "tmio_mmc.h"
|
|
|
|
|
|
|
|
#define TMIO_MMC_MIN_DMA_LEN 8
|
|
|
|
|
2017-05-10 17:25:30 +08:00
|
|
|
static const struct renesas_sdhi_of_data of_default_cfg = {
|
|
|
|
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct renesas_sdhi_of_data of_rz_compatible = {
|
2017-08-10 03:14:51 +08:00
|
|
|
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT |
|
|
|
|
TMIO_MMC_HAVE_CBSY,
|
2017-05-10 17:25:30 +08:00
|
|
|
.tmio_ocr_mask = MMC_VDD_32_33,
|
|
|
|
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = {
|
|
|
|
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
|
|
|
|
TMIO_MMC_CLK_ACTUAL,
|
|
|
|
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Definitions for sampling clocks */
|
|
|
|
static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = {
|
|
|
|
{
|
|
|
|
.clk_rate = 156000000,
|
|
|
|
.tap = 0x00000703,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.clk_rate = 0,
|
|
|
|
.tap = 0x00000300,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
|
|
|
|
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
|
2017-08-10 03:00:41 +08:00
|
|
|
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY |
|
|
|
|
TMIO_MMC_MIN_RCAR2,
|
2017-05-19 21:31:55 +08:00
|
|
|
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
|
|
|
|
MMC_CAP_CMD23,
|
2017-05-10 17:25:30 +08:00
|
|
|
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
|
|
|
|
.dma_rx_offset = 0x2000,
|
|
|
|
.scc_offset = 0x0300,
|
|
|
|
.taps = rcar_gen2_scc_taps,
|
|
|
|
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Definitions for sampling clocks */
|
|
|
|
static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
|
|
|
|
{
|
|
|
|
.clk_rate = 0,
|
|
|
|
.tap = 0x00000300,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
|
|
|
|
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
|
2017-08-10 03:00:41 +08:00
|
|
|
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY |
|
|
|
|
TMIO_MMC_MIN_RCAR2,
|
2017-05-19 21:31:55 +08:00
|
|
|
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
|
|
|
|
MMC_CAP_CMD23,
|
2017-05-10 17:25:30 +08:00
|
|
|
.bus_shift = 2,
|
|
|
|
.scc_offset = 0x1000,
|
|
|
|
.taps = rcar_gen3_scc_taps,
|
|
|
|
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
|
|
|
|
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
|
|
|
|
{ .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
|
2017-08-29 21:52:06 +08:00
|
|
|
{ .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, },
|
2017-05-10 17:25:30 +08:00
|
|
|
{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
|
2017-10-18 15:00:23 +08:00
|
|
|
{ .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
|
|
|
|
{ .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
|
|
|
|
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
|
|
|
|
{ .compatible = "renesas,sdhi-shmobile" },
|
2017-05-10 17:25:30 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
|
|
|
|
bool enable)
|
2011-03-23 19:42:44 +08:00
|
|
|
{
|
2011-07-15 00:39:10 +08:00
|
|
|
if (!host->chan_tx || !host->chan_rx)
|
|
|
|
return;
|
|
|
|
|
2015-01-13 12:59:05 +08:00
|
|
|
if (host->dma->enable)
|
|
|
|
host->dma->enable(host, enable);
|
2011-03-23 19:42:44 +08:00
|
|
|
}
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
|
2012-01-06 20:06:51 +08:00
|
|
|
{
|
2017-05-10 17:25:28 +08:00
|
|
|
renesas_sdhi_sys_dmac_enable_dma(host, false);
|
2012-01-06 20:06:51 +08:00
|
|
|
|
|
|
|
if (host->chan_rx)
|
|
|
|
dmaengine_terminate_all(host->chan_rx);
|
|
|
|
if (host->chan_tx)
|
|
|
|
dmaengine_terminate_all(host->chan_tx);
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
renesas_sdhi_sys_dmac_enable_dma(host, true);
|
2012-01-06 20:06:51 +08:00
|
|
|
}
|
|
|
|
|
2017-06-21 22:00:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
|
|
|
|
{
|
|
|
|
complete(&host->dma_dataend);
|
|
|
|
}
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
|
mmc: tmio: ensure end of DMA and SD access are in sync
The current code assumes that DMA is finished before SD access end is
flagged. Thus, it schedules the 'dma_complete' tasklet in the SD card
interrupt routine when DATAEND is set. The assumption is not safe,
though. Even by mounting an SD card, it can be seen that sometimes DMA
complete is first, sometimes DATAEND. It seems they are usually close
enough timewise to not cause problems. However, a customer reported that
with CMD53 sometimes things really break apart. As a result, the BSP has
a patch which introduces flags for both events and makes sure both flags
are set before scheduling the tasklet. The customer accepted the patch,
yet it doesn't seem a proper upstream solution to me.
This patch refactors the code to replace the tasklet with already
existing and more lightweight mechanisms. First of all, we set the
callback in a DMA descriptor to automatically get notified when DMA is
done. In the callback, we then use a completion to make sure the SD
access has already ended. Then, we proceed as before.
Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-02-18 02:22:41 +08:00
|
|
|
{
|
|
|
|
struct tmio_mmc_host *host = arg;
|
|
|
|
|
|
|
|
spin_lock_irq(&host->lock);
|
|
|
|
|
|
|
|
if (!host->data)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (host->data->flags & MMC_DATA_READ)
|
|
|
|
dma_unmap_sg(host->chan_rx->device->dev,
|
|
|
|
host->sg_ptr, host->sg_len,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
else
|
|
|
|
dma_unmap_sg(host->chan_tx->device->dev,
|
|
|
|
host->sg_ptr, host->sg_len,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
2017-03-16 18:56:02 +08:00
|
|
|
spin_unlock_irq(&host->lock);
|
|
|
|
|
|
|
|
wait_for_completion(&host->dma_dataend);
|
|
|
|
|
|
|
|
spin_lock_irq(&host->lock);
|
mmc: tmio: ensure end of DMA and SD access are in sync
The current code assumes that DMA is finished before SD access end is
flagged. Thus, it schedules the 'dma_complete' tasklet in the SD card
interrupt routine when DATAEND is set. The assumption is not safe,
though. Even by mounting an SD card, it can be seen that sometimes DMA
complete is first, sometimes DATAEND. It seems they are usually close
enough timewise to not cause problems. However, a customer reported that
with CMD53 sometimes things really break apart. As a result, the BSP has
a patch which introduces flags for both events and makes sure both flags
are set before scheduling the tasklet. The customer accepted the patch,
yet it doesn't seem a proper upstream solution to me.
This patch refactors the code to replace the tasklet with already
existing and more lightweight mechanisms. First of all, we set the
callback in a DMA descriptor to automatically get notified when DMA is
done. In the callback, we then use a completion to make sure the SD
access has already ended. Then, we proceed as before.
Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-02-18 02:22:41 +08:00
|
|
|
tmio_mmc_do_data_irq(host);
|
|
|
|
out:
|
|
|
|
spin_unlock_irq(&host->lock);
|
|
|
|
}
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
|
2011-03-23 19:42:44 +08:00
|
|
|
{
|
|
|
|
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
|
|
|
|
struct dma_async_tx_descriptor *desc = NULL;
|
|
|
|
struct dma_chan *chan = host->chan_rx;
|
|
|
|
dma_cookie_t cookie;
|
|
|
|
int ret, i;
|
|
|
|
bool aligned = true, multiple = true;
|
2015-01-13 12:58:46 +08:00
|
|
|
unsigned int align = (1 << host->pdata->alignment_shift) - 1;
|
2011-03-23 19:42:44 +08:00
|
|
|
|
|
|
|
for_each_sg(sg, sg_tmp, host->sg_len, i) {
|
|
|
|
if (sg_tmp->offset & align)
|
|
|
|
aligned = false;
|
|
|
|
if (sg_tmp->length & align) {
|
|
|
|
multiple = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
|
2011-03-23 19:42:44 +08:00
|
|
|
(align & PAGE_MASK))) || !multiple) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto pio;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
|
|
|
|
host->force_pio = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
|
|
|
|
|
|
|
|
/* The only sg element can be unaligned, use our bounce buffer then */
|
|
|
|
if (!aligned) {
|
|
|
|
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
|
|
|
|
host->sg_ptr = &host->bounce_sg;
|
|
|
|
sg = host->sg_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
|
|
|
|
if (ret > 0)
|
2017-06-17 00:11:04 +08:00
|
|
|
desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM,
|
|
|
|
DMA_CTRL_ACK);
|
2011-03-23 19:42:44 +08:00
|
|
|
|
|
|
|
if (desc) {
|
mmc: tmio: ensure end of DMA and SD access are in sync
The current code assumes that DMA is finished before SD access end is
flagged. Thus, it schedules the 'dma_complete' tasklet in the SD card
interrupt routine when DATAEND is set. The assumption is not safe,
though. Even by mounting an SD card, it can be seen that sometimes DMA
complete is first, sometimes DATAEND. It seems they are usually close
enough timewise to not cause problems. However, a customer reported that
with CMD53 sometimes things really break apart. As a result, the BSP has
a patch which introduces flags for both events and makes sure both flags
are set before scheduling the tasklet. The customer accepted the patch,
yet it doesn't seem a proper upstream solution to me.
This patch refactors the code to replace the tasklet with already
existing and more lightweight mechanisms. First of all, we set the
callback in a DMA descriptor to automatically get notified when DMA is
done. In the callback, we then use a completion to make sure the SD
access has already ended. Then, we proceed as before.
Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-02-18 02:22:41 +08:00
|
|
|
reinit_completion(&host->dma_dataend);
|
2017-05-10 17:25:28 +08:00
|
|
|
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
|
mmc: tmio: ensure end of DMA and SD access are in sync
The current code assumes that DMA is finished before SD access end is
flagged. Thus, it schedules the 'dma_complete' tasklet in the SD card
interrupt routine when DATAEND is set. The assumption is not safe,
though. Even by mounting an SD card, it can be seen that sometimes DMA
complete is first, sometimes DATAEND. It seems they are usually close
enough timewise to not cause problems. However, a customer reported that
with CMD53 sometimes things really break apart. As a result, the BSP has
a patch which introduces flags for both events and makes sure both flags
are set before scheduling the tasklet. The customer accepted the patch,
yet it doesn't seem a proper upstream solution to me.
This patch refactors the code to replace the tasklet with already
existing and more lightweight mechanisms. First of all, we set the
callback in a DMA descriptor to automatically get notified when DMA is
done. In the callback, we then use a completion to make sure the SD
access has already ended. Then, we proceed as before.
Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-02-18 02:22:41 +08:00
|
|
|
desc->callback_param = host;
|
|
|
|
|
2011-03-23 19:42:44 +08:00
|
|
|
cookie = dmaengine_submit(desc);
|
|
|
|
if (cookie < 0) {
|
|
|
|
desc = NULL;
|
|
|
|
ret = cookie;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pio:
|
|
|
|
if (!desc) {
|
|
|
|
/* DMA failed, fall back to PIO */
|
2017-05-10 17:25:28 +08:00
|
|
|
renesas_sdhi_sys_dmac_enable_dma(host, false);
|
2011-03-23 19:42:44 +08:00
|
|
|
if (ret >= 0)
|
|
|
|
ret = -EIO;
|
|
|
|
host->chan_rx = NULL;
|
|
|
|
dma_release_channel(chan);
|
|
|
|
/* Free the Tx channel too */
|
|
|
|
chan = host->chan_tx;
|
|
|
|
if (chan) {
|
|
|
|
host->chan_tx = NULL;
|
|
|
|
dma_release_channel(chan);
|
|
|
|
}
|
|
|
|
dev_warn(&host->pdev->dev,
|
|
|
|
"DMA failed: %d, falling back to PIO\n", ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
|
2011-03-23 19:42:44 +08:00
|
|
|
{
|
|
|
|
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
|
|
|
|
struct dma_async_tx_descriptor *desc = NULL;
|
|
|
|
struct dma_chan *chan = host->chan_tx;
|
|
|
|
dma_cookie_t cookie;
|
|
|
|
int ret, i;
|
|
|
|
bool aligned = true, multiple = true;
|
2015-01-13 12:58:46 +08:00
|
|
|
unsigned int align = (1 << host->pdata->alignment_shift) - 1;
|
2011-03-23 19:42:44 +08:00
|
|
|
|
|
|
|
for_each_sg(sg, sg_tmp, host->sg_len, i) {
|
|
|
|
if (sg_tmp->offset & align)
|
|
|
|
aligned = false;
|
|
|
|
if (sg_tmp->length & align) {
|
|
|
|
multiple = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
|
2011-03-23 19:42:44 +08:00
|
|
|
(align & PAGE_MASK))) || !multiple) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto pio;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
|
|
|
|
host->force_pio = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
|
|
|
|
|
|
|
|
/* The only sg element can be unaligned, use our bounce buffer then */
|
|
|
|
if (!aligned) {
|
|
|
|
unsigned long flags;
|
|
|
|
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
|
2017-06-17 00:11:04 +08:00
|
|
|
|
2011-03-23 19:42:44 +08:00
|
|
|
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
|
|
|
|
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
|
|
|
|
tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
|
|
|
|
host->sg_ptr = &host->bounce_sg;
|
|
|
|
sg = host->sg_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
|
|
|
|
if (ret > 0)
|
2017-06-17 00:11:04 +08:00
|
|
|
desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV,
|
|
|
|
DMA_CTRL_ACK);
|
2011-03-23 19:42:44 +08:00
|
|
|
|
|
|
|
if (desc) {
|
mmc: tmio: ensure end of DMA and SD access are in sync
The current code assumes that DMA is finished before SD access end is
flagged. Thus, it schedules the 'dma_complete' tasklet in the SD card
interrupt routine when DATAEND is set. The assumption is not safe,
though. Even by mounting an SD card, it can be seen that sometimes DMA
complete is first, sometimes DATAEND. It seems they are usually close
enough timewise to not cause problems. However, a customer reported that
with CMD53 sometimes things really break apart. As a result, the BSP has
a patch which introduces flags for both events and makes sure both flags
are set before scheduling the tasklet. The customer accepted the patch,
yet it doesn't seem a proper upstream solution to me.
This patch refactors the code to replace the tasklet with already
existing and more lightweight mechanisms. First of all, we set the
callback in a DMA descriptor to automatically get notified when DMA is
done. In the callback, we then use a completion to make sure the SD
access has already ended. Then, we proceed as before.
Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-02-18 02:22:41 +08:00
|
|
|
reinit_completion(&host->dma_dataend);
|
2017-05-10 17:25:28 +08:00
|
|
|
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
|
mmc: tmio: ensure end of DMA and SD access are in sync
The current code assumes that DMA is finished before SD access end is
flagged. Thus, it schedules the 'dma_complete' tasklet in the SD card
interrupt routine when DATAEND is set. The assumption is not safe,
though. Even by mounting an SD card, it can be seen that sometimes DMA
complete is first, sometimes DATAEND. It seems they are usually close
enough timewise to not cause problems. However, a customer reported that
with CMD53 sometimes things really break apart. As a result, the BSP has
a patch which introduces flags for both events and makes sure both flags
are set before scheduling the tasklet. The customer accepted the patch,
yet it doesn't seem a proper upstream solution to me.
This patch refactors the code to replace the tasklet with already
existing and more lightweight mechanisms. First of all, we set the
callback in a DMA descriptor to automatically get notified when DMA is
done. In the callback, we then use a completion to make sure the SD
access has already ended. Then, we proceed as before.
Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-02-18 02:22:41 +08:00
|
|
|
desc->callback_param = host;
|
|
|
|
|
2011-03-23 19:42:44 +08:00
|
|
|
cookie = dmaengine_submit(desc);
|
|
|
|
if (cookie < 0) {
|
|
|
|
desc = NULL;
|
|
|
|
ret = cookie;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pio:
|
|
|
|
if (!desc) {
|
|
|
|
/* DMA failed, fall back to PIO */
|
2017-05-10 17:25:28 +08:00
|
|
|
renesas_sdhi_sys_dmac_enable_dma(host, false);
|
2011-03-23 19:42:44 +08:00
|
|
|
if (ret >= 0)
|
|
|
|
ret = -EIO;
|
|
|
|
host->chan_tx = NULL;
|
|
|
|
dma_release_channel(chan);
|
|
|
|
/* Free the Rx channel too */
|
|
|
|
chan = host->chan_rx;
|
|
|
|
if (chan) {
|
|
|
|
host->chan_rx = NULL;
|
|
|
|
dma_release_channel(chan);
|
|
|
|
}
|
|
|
|
dev_warn(&host->pdev->dev,
|
|
|
|
"DMA failed: %d, falling back to PIO\n", ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host,
|
2017-06-17 00:11:04 +08:00
|
|
|
struct mmc_data *data)
|
2011-03-23 19:42:44 +08:00
|
|
|
{
|
|
|
|
if (data->flags & MMC_DATA_READ) {
|
|
|
|
if (host->chan_rx)
|
2017-05-10 17:25:28 +08:00
|
|
|
renesas_sdhi_sys_dmac_start_dma_rx(host);
|
2011-03-23 19:42:44 +08:00
|
|
|
} else {
|
|
|
|
if (host->chan_tx)
|
2017-05-10 17:25:28 +08:00
|
|
|
renesas_sdhi_sys_dmac_start_dma_tx(host);
|
2011-03-23 19:42:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
|
2011-03-23 19:42:44 +08:00
|
|
|
{
|
|
|
|
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
|
|
|
|
struct dma_chan *chan = NULL;
|
|
|
|
|
|
|
|
spin_lock_irq(&host->lock);
|
|
|
|
|
|
|
|
if (host && host->data) {
|
|
|
|
if (host->data->flags & MMC_DATA_READ)
|
|
|
|
chan = host->chan_rx;
|
|
|
|
else
|
|
|
|
chan = host->chan_tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irq(&host->lock);
|
|
|
|
|
|
|
|
tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
|
|
|
|
|
|
|
|
if (chan)
|
|
|
|
dma_async_issue_pending(chan);
|
|
|
|
}
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
|
|
|
|
struct tmio_mmc_data *pdata)
|
2011-03-23 19:42:44 +08:00
|
|
|
{
|
|
|
|
/* We can only either use DMA for both Tx and Rx or not use it at all */
|
2017-11-25 00:24:46 +08:00
|
|
|
if (!host->pdev->dev.of_node &&
|
|
|
|
(!pdata->chan_priv_tx || !pdata->chan_priv_rx))
|
2011-05-06 00:13:12 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!host->chan_tx && !host->chan_rx) {
|
2013-04-26 23:47:18 +08:00
|
|
|
struct resource *res = platform_get_resource(host->pdev,
|
|
|
|
IORESOURCE_MEM, 0);
|
|
|
|
struct dma_slave_config cfg = {};
|
2011-03-23 19:42:44 +08:00
|
|
|
dma_cap_mask_t mask;
|
2013-04-26 23:47:18 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!res)
|
|
|
|
return;
|
2011-03-23 19:42:44 +08:00
|
|
|
|
|
|
|
dma_cap_zero(mask);
|
|
|
|
dma_cap_set(DMA_SLAVE, mask);
|
|
|
|
|
2013-04-26 23:47:19 +08:00
|
|
|
host->chan_tx = dma_request_slave_channel_compat(mask,
|
2015-02-24 10:06:43 +08:00
|
|
|
host->dma->filter, pdata->chan_priv_tx,
|
2013-04-26 23:47:19 +08:00
|
|
|
&host->pdev->dev, "tx");
|
2011-03-23 19:42:44 +08:00
|
|
|
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
|
|
|
|
host->chan_tx);
|
|
|
|
|
|
|
|
if (!host->chan_tx)
|
|
|
|
return;
|
|
|
|
|
2013-04-26 23:47:18 +08:00
|
|
|
cfg.direction = DMA_MEM_TO_DEV;
|
2017-06-17 00:11:04 +08:00
|
|
|
cfg.dst_addr = res->start +
|
|
|
|
(CTL_SD_DATA_PORT << host->bus_shift);
|
2015-01-13 12:59:14 +08:00
|
|
|
cfg.dst_addr_width = host->dma->dma_buswidth;
|
|
|
|
if (!cfg.dst_addr_width)
|
|
|
|
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
2013-04-26 23:47:18 +08:00
|
|
|
cfg.src_addr = 0;
|
|
|
|
ret = dmaengine_slave_config(host->chan_tx, &cfg);
|
|
|
|
if (ret < 0)
|
|
|
|
goto ecfgtx;
|
|
|
|
|
2013-04-26 23:47:19 +08:00
|
|
|
host->chan_rx = dma_request_slave_channel_compat(mask,
|
2015-02-24 10:06:43 +08:00
|
|
|
host->dma->filter, pdata->chan_priv_rx,
|
2013-04-26 23:47:19 +08:00
|
|
|
&host->pdev->dev, "rx");
|
2011-03-23 19:42:44 +08:00
|
|
|
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
|
|
|
|
host->chan_rx);
|
|
|
|
|
|
|
|
if (!host->chan_rx)
|
|
|
|
goto ereqrx;
|
|
|
|
|
2013-04-26 23:47:18 +08:00
|
|
|
cfg.direction = DMA_DEV_TO_MEM;
|
2015-01-13 12:58:56 +08:00
|
|
|
cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
|
2015-01-13 12:59:14 +08:00
|
|
|
cfg.src_addr_width = host->dma->dma_buswidth;
|
|
|
|
if (!cfg.src_addr_width)
|
|
|
|
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
2013-04-26 23:47:18 +08:00
|
|
|
cfg.dst_addr = 0;
|
|
|
|
ret = dmaengine_slave_config(host->chan_rx, &cfg);
|
|
|
|
if (ret < 0)
|
|
|
|
goto ecfgrx;
|
|
|
|
|
2011-03-23 19:42:44 +08:00
|
|
|
host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
|
|
|
|
if (!host->bounce_buf)
|
|
|
|
goto ebouncebuf;
|
|
|
|
|
mmc: tmio: ensure end of DMA and SD access are in sync
The current code assumes that DMA is finished before SD access end is
flagged. Thus, it schedules the 'dma_complete' tasklet in the SD card
interrupt routine when DATAEND is set. The assumption is not safe,
though. Even by mounting an SD card, it can be seen that sometimes DMA
complete is first, sometimes DATAEND. It seems they are usually close
enough timewise to not cause problems. However, a customer reported that
with CMD53 sometimes things really break apart. As a result, the BSP has
a patch which introduces flags for both events and makes sure both flags
are set before scheduling the tasklet. The customer accepted the patch,
yet it doesn't seem a proper upstream solution to me.
This patch refactors the code to replace the tasklet with already
existing and more lightweight mechanisms. First of all, we set the
callback in a DMA descriptor to automatically get notified when DMA is
done. In the callback, we then use a completion to make sure the SD
access has already ended. Then, we proceed as before.
Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-02-18 02:22:41 +08:00
|
|
|
init_completion(&host->dma_dataend);
|
2017-05-10 17:25:28 +08:00
|
|
|
tasklet_init(&host->dma_issue,
|
|
|
|
renesas_sdhi_sys_dmac_issue_tasklet_fn,
|
|
|
|
(unsigned long)host);
|
2011-05-06 00:13:12 +08:00
|
|
|
}
|
2011-03-23 19:42:44 +08:00
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
renesas_sdhi_sys_dmac_enable_dma(host, true);
|
2011-05-06 00:13:12 +08:00
|
|
|
|
|
|
|
return;
|
2011-03-23 19:42:44 +08:00
|
|
|
|
|
|
|
ebouncebuf:
|
2013-04-26 23:47:18 +08:00
|
|
|
ecfgrx:
|
2011-05-06 00:13:12 +08:00
|
|
|
dma_release_channel(host->chan_rx);
|
|
|
|
host->chan_rx = NULL;
|
2011-03-23 19:42:44 +08:00
|
|
|
ereqrx:
|
2013-04-26 23:47:18 +08:00
|
|
|
ecfgtx:
|
2011-05-06 00:13:12 +08:00
|
|
|
dma_release_channel(host->chan_tx);
|
|
|
|
host->chan_tx = NULL;
|
2011-03-23 19:42:44 +08:00
|
|
|
}
|
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host)
|
2011-03-23 19:42:44 +08:00
|
|
|
{
|
|
|
|
if (host->chan_tx) {
|
|
|
|
struct dma_chan *chan = host->chan_tx;
|
2017-06-17 00:11:04 +08:00
|
|
|
|
2011-03-23 19:42:44 +08:00
|
|
|
host->chan_tx = NULL;
|
|
|
|
dma_release_channel(chan);
|
|
|
|
}
|
|
|
|
if (host->chan_rx) {
|
|
|
|
struct dma_chan *chan = host->chan_rx;
|
2017-06-17 00:11:04 +08:00
|
|
|
|
2011-03-23 19:42:44 +08:00
|
|
|
host->chan_rx = NULL;
|
|
|
|
dma_release_channel(chan);
|
|
|
|
}
|
|
|
|
if (host->bounce_buf) {
|
|
|
|
free_pages((unsigned long)host->bounce_buf, 0);
|
|
|
|
host->bounce_buf = NULL;
|
|
|
|
}
|
|
|
|
}
|
2017-05-10 17:25:26 +08:00
|
|
|
|
2017-05-10 17:25:28 +08:00
|
|
|
static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
|
|
|
|
.start = renesas_sdhi_sys_dmac_start_dma,
|
|
|
|
.enable = renesas_sdhi_sys_dmac_enable_dma,
|
|
|
|
.request = renesas_sdhi_sys_dmac_request_dma,
|
|
|
|
.release = renesas_sdhi_sys_dmac_release_dma,
|
|
|
|
.abort = renesas_sdhi_sys_dmac_abort_dma,
|
2017-06-21 22:00:28 +08:00
|
|
|
.dataend = renesas_sdhi_sys_dmac_dataend_dma,
|
2017-05-10 17:25:26 +08:00
|
|
|
};
|
|
|
|
|
2017-08-02 20:48:42 +08:00
|
|
|
/*
|
|
|
|
* Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
|
|
|
|
* implementation. Currently empty as all supported ES versions use
|
|
|
|
* the internal DMAC.
|
|
|
|
*/
|
|
|
|
static const struct soc_device_attribute gen3_soc_whitelist[] = {
|
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
|
2017-05-10 17:25:30 +08:00
|
|
|
static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
|
2017-05-10 17:25:26 +08:00
|
|
|
{
|
2017-08-02 20:48:42 +08:00
|
|
|
if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
|
|
|
|
!soc_device_match(gen3_soc_whitelist))
|
|
|
|
return -ENODEV;
|
|
|
|
|
2017-05-10 17:25:30 +08:00
|
|
|
return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
|
2017-05-10 17:25:26 +08:00
|
|
|
}
|
2017-05-10 17:25:30 +08:00
|
|
|
|
|
|
|
static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
|
|
|
|
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
2017-06-17 00:11:04 +08:00
|
|
|
pm_runtime_force_resume)
|
2017-05-10 17:25:30 +08:00
|
|
|
SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
|
2017-06-17 00:11:04 +08:00
|
|
|
tmio_mmc_host_runtime_resume,
|
|
|
|
NULL)
|
2017-05-10 17:25:30 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct platform_driver renesas_sys_dmac_sdhi_driver = {
|
|
|
|
.driver = {
|
|
|
|
.name = "sh_mobile_sdhi",
|
|
|
|
.pm = &renesas_sdhi_sys_dmac_dev_pm_ops,
|
|
|
|
.of_match_table = renesas_sdhi_sys_dmac_of_match,
|
|
|
|
},
|
|
|
|
.probe = renesas_sdhi_sys_dmac_probe,
|
|
|
|
.remove = renesas_sdhi_remove,
|
|
|
|
};
|
|
|
|
|
|
|
|
module_platform_driver(renesas_sys_dmac_sdhi_driver);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Renesas SDHI driver");
|
|
|
|
MODULE_AUTHOR("Magnus Damm");
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_ALIAS("platform:sh_mobile_sdhi");
|