drm/tegra: Remove memory allocation from Falcon library

Having to provide allocator hooks to the Falcon library is somewhat
cumbersome and it doesn't give the users of the library a lot of
flexibility to deal with allocations. Instead, remove the notion of
Falcon "operations" and let drivers deal with the memory allocations
themselves.

Signed-off-by: Thierry Reding <treding@nvidia.com>
This commit is contained in:
Thierry Reding 2019-10-28 13:37:15 +01:00
parent 06867a362d
commit 20e7dce255
3 changed files with 68 additions and 84 deletions

View File

@ -59,26 +59,11 @@ static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
u32 *firmware_vaddr = falcon->firmware.vaddr;
dma_addr_t daddr;
size_t i;
int err;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
/* ensure that caches are flushed and falcon can see the firmware */
daddr = dma_map_single(falcon->dev, firmware_vaddr,
falcon->firmware.size, DMA_TO_DEVICE);
err = dma_mapping_error(falcon->dev, daddr);
if (err) {
dev_err(falcon->dev, "failed to map firmware: %d\n", err);
return;
}
dma_sync_single_for_device(falcon->dev, daddr,
falcon->firmware.size, DMA_TO_DEVICE);
dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
DMA_TO_DEVICE);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name)
if (err < 0)
return err;
falcon->firmware.size = falcon->firmware.firmware->size;
return 0;
}
@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon)
const struct firmware *firmware = falcon->firmware.firmware;
int err;
falcon->firmware.size = firmware->size;
/* allocate iova space for the firmware */
falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
&falcon->firmware.paddr);
if (IS_ERR(falcon->firmware.vaddr)) {
dev_err(falcon->dev, "DMA memory mapping failed\n");
return PTR_ERR(falcon->firmware.vaddr);
}
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
@ -150,27 +127,17 @@ int falcon_load_firmware(struct falcon *falcon)
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
goto err_setup_firmware_image;
return err;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
err_setup_firmware_image:
falcon->ops->free(falcon, falcon->firmware.size,
falcon->firmware.paddr, falcon->firmware.vaddr);
return err;
}
int falcon_init(struct falcon *falcon)
{
/* check mandatory ops */
if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
return -EINVAL;
falcon->firmware.vaddr = NULL;
return 0;
@ -178,17 +145,8 @@ int falcon_init(struct falcon *falcon)
void falcon_exit(struct falcon *falcon)
{
if (falcon->firmware.firmware) {
if (falcon->firmware.firmware)
release_firmware(falcon->firmware.firmware);
falcon->firmware.firmware = NULL;
}
if (falcon->firmware.vaddr) {
falcon->ops->free(falcon, falcon->firmware.size,
falcon->firmware.paddr,
falcon->firmware.vaddr);
falcon->firmware.vaddr = NULL;
}
}
int falcon_boot(struct falcon *falcon)

View File

@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 {
u32 data_size;
};
struct falcon;
struct falcon_ops {
void *(*alloc)(struct falcon *falcon, size_t size,
dma_addr_t *paddr);
void (*free)(struct falcon *falcon, size_t size,
dma_addr_t paddr, void *vaddr);
};
struct falcon_firmware_section {
unsigned long offset;
size_t size;
@ -107,8 +98,6 @@ struct falcon {
/* Set by falcon client */
struct device *dev;
void __iomem *regs;
const struct falcon_ops *ops;
void *data;
struct falcon_firmware firmware;
};

View File

@ -158,27 +158,6 @@ static int vic_boot(struct vic *vic)
return 0;
}
static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
dma_addr_t *iova)
{
struct tegra_drm *tegra = falcon->data;
return tegra_drm_alloc(tegra, size, iova);
}
static void vic_falcon_free(struct falcon *falcon, size_t size,
dma_addr_t iova, void *va)
{
struct tegra_drm *tegra = falcon->data;
return tegra_drm_free(tegra, size, va, iova);
}
static const struct falcon_ops vic_falcon_ops = {
.alloc = vic_falcon_alloc,
.free = vic_falcon_free
};
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
@ -246,6 +225,15 @@ static int vic_exit(struct host1x_client *client)
host1x_channel_put(vic->channel);
host1x_client_iommu_detach(client);
if (client->group)
tegra_drm_free(tegra, vic->falcon.firmware.size,
vic->falcon.firmware.vaddr,
vic->falcon.firmware.paddr);
else
dma_free_coherent(vic->dev, vic->falcon.firmware.size,
vic->falcon.firmware.vaddr,
vic->falcon.firmware.paddr);
return 0;
}
@ -256,25 +244,75 @@ static const struct host1x_client_ops vic_client_ops = {
static int vic_load_firmware(struct vic *vic)
{
struct host1x_client *client = &vic->client.base;
struct tegra_drm *tegra = vic->client.drm;
dma_addr_t phys;
size_t size;
void *virt;
int err;
if (vic->falcon.data)
if (vic->falcon.firmware.vaddr)
return 0;
vic->falcon.data = vic->client.drm;
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
goto cleanup;
return err;
size = vic->falcon.firmware.size;
if (!client->group) {
virt = dma_alloc_coherent(vic->dev, size, &phys, GFP_KERNEL);
err = dma_mapping_error(vic->dev, phys);
if (err < 0)
return err;
} else {
virt = tegra_drm_alloc(tegra, size, &phys);
}
vic->falcon.firmware.vaddr = virt;
vic->falcon.firmware.paddr = phys;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
goto cleanup;
/*
* In this case we have received an IOVA from the shared domain, so we
* need to make sure to get the physical address so that the DMA API
* knows what memory pages to flush the cache for.
*/
if (client->group) {
phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
err = dma_mapping_error(vic->dev, phys);
if (err < 0)
goto cleanup;
/*
* If the DMA API mapped this through a bounce buffer, the
* dma_sync_single_for_device() call below will not be able
* to flush the caches for the right memory pages. Output a
* big warning in that case so that the DMA mask can be set
* properly and the bounce buffer avoided.
*/
WARN(phys != vic->falcon.firmware.paddr,
"check DMA mask setting for %s\n", dev_name(vic->dev));
}
dma_sync_single_for_device(vic->dev, phys, size, DMA_TO_DEVICE);
if (client->group)
dma_unmap_single(vic->dev, phys, size, DMA_TO_DEVICE);
return 0;
cleanup:
vic->falcon.data = NULL;
if (!client->group)
dma_free_coherent(vic->dev, size, virt, phys);
else
tegra_drm_free(tegra, size, virt, phys);
return err;
}
@ -415,7 +453,6 @@ static int vic_probe(struct platform_device *pdev)
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
vic->falcon.ops = &vic_falcon_ops;
err = falcon_init(&vic->falcon);
if (err < 0)