Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next
Most notable addition this time is the support for the GPU performance counters by Christian. This has been in the making for some time and it has matured a lot. Since this is adding UAPI, the corresponding WIP userspace can be found at [1] mesa/libdrm repos. I expect that Christian sends out the final userspace patches for this once you have pulled the kernel bits. Philipp optimized the probe path, so etnaviv gets out of the way for systems that want to boot real quick. I've done mostly cleanups, disentangling etnaviv from the IOMMU API, with some MMUv1 optimizations on the way. * 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux: (36 commits) drm/etnaviv: remove unnecessary clock stabilization delay drm/etnaviv: reduce reset delay drm/etnaviv: remove unused function etnaviv_gem_new drm/etnaviv: remove stale comment drm/etnaviv: submit supports performance monitor requests drm/etnaviv: enable debug registers on demand drm/etnaviv: need to disable clock gating when doing profiling drm/etnaviv: add MC perf domain drm/etnaviv: add TX perf domain drm/etnaviv: add RA perf domain drm/etnaviv: add SE perf domain drm/etnaviv: add PA perf domain drm/etnaviv: add SH perf domain drm/etnaviv: add PE perf domain drm/etnaviv: add HI perf domain drm/etnaviv: use 'sync points' for performance monitor requests drm/etnaviv: clear alloced event drm/etnaviv: add 'sync point' support drm/etnaviv: add performance monitor request processing drm/etnaviv: copy pmrs from userspace ...
This commit is contained in:
commit
787e1b74b7
|
@ -7,8 +7,6 @@ config DRM_ETNAVIV
|
|||
select SHMEM
|
||||
select SYNC_FILE
|
||||
select TMPFS
|
||||
select IOMMU_API
|
||||
select IOMMU_SUPPORT
|
||||
select WANT_DEV_COREDUMP
|
||||
select CMA if HAVE_DMA_CONTIGUOUS
|
||||
select DMA_CMA if HAVE_DMA_CONTIGUOUS
|
||||
|
|
|
@ -10,6 +10,7 @@ etnaviv-y := \
|
|||
etnaviv_gpu.o \
|
||||
etnaviv_iommu_v2.o \
|
||||
etnaviv_iommu.o \
|
||||
etnaviv_mmu.o
|
||||
etnaviv_mmu.o \
|
||||
etnaviv_perfmon.o
|
||||
|
||||
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
|
||||
|
|
|
@ -250,6 +250,42 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
|
|||
}
|
||||
}
|
||||
|
||||
/* Append a 'sync point' to the ring buffer. */
|
||||
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
|
||||
{
|
||||
struct etnaviv_cmdbuf *buffer = gpu->buffer;
|
||||
unsigned int waitlink_offset = buffer->user_size - 16;
|
||||
u32 dwords, target;
|
||||
|
||||
/*
|
||||
* We need at most 3 dwords in the return target:
|
||||
* 1 event + 1 end + 1 wait + 1 link.
|
||||
*/
|
||||
dwords = 4;
|
||||
target = etnaviv_buffer_reserve(gpu, buffer, dwords);
|
||||
|
||||
/* Signal sync point event */
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
|
||||
VIVS_GL_EVENT_FROM_PE);
|
||||
|
||||
/* Stop the FE to 'pause' the GPU */
|
||||
CMD_END(buffer);
|
||||
|
||||
/* Append waitlink */
|
||||
CMD_WAIT(buffer);
|
||||
CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
|
||||
buffer->user_size - 4);
|
||||
|
||||
/*
|
||||
* Kick off the 'sync point' command by replacing the previous
|
||||
* WAIT with a link to the address in the ring buffer.
|
||||
*/
|
||||
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
|
||||
VIV_FE_LINK_HEADER_OP_LINK |
|
||||
VIV_FE_LINK_HEADER_PREFETCH(dwords),
|
||||
target);
|
||||
}
|
||||
|
||||
/* Append a command buffer to the ring buffer. */
|
||||
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
||||
struct etnaviv_cmdbuf *cmdbuf)
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "etnaviv_cmdbuf.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
#include "etnaviv_perfmon.h"
|
||||
|
||||
#define SUBALLOC_SIZE SZ_256K
|
||||
#define SUBALLOC_GRANULE SZ_4K
|
||||
|
@ -87,9 +88,10 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
|
|||
|
||||
struct etnaviv_cmdbuf *
|
||||
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
|
||||
size_t nr_bos)
|
||||
size_t nr_bos, size_t nr_pmrs)
|
||||
{
|
||||
struct etnaviv_cmdbuf *cmdbuf;
|
||||
struct etnaviv_perfmon_request *pmrs;
|
||||
size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
|
||||
sizeof(*cmdbuf));
|
||||
int granule_offs, order, ret;
|
||||
|
@ -98,6 +100,12 @@ etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
|
|||
if (!cmdbuf)
|
||||
return NULL;
|
||||
|
||||
sz = sizeof(*pmrs) * nr_pmrs;
|
||||
pmrs = kzalloc(sz, GFP_KERNEL);
|
||||
if (!pmrs)
|
||||
goto out_free_cmdbuf;
|
||||
|
||||
cmdbuf->pmrs = pmrs;
|
||||
cmdbuf->suballoc = suballoc;
|
||||
cmdbuf->size = size;
|
||||
|
||||
|
@ -124,6 +132,10 @@ etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
|
|||
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
|
||||
|
||||
return cmdbuf;
|
||||
|
||||
out_free_cmdbuf:
|
||||
kfree(cmdbuf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
|
||||
|
@ -139,6 +151,7 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
|
|||
suballoc->free_space = 1;
|
||||
mutex_unlock(&suballoc->lock);
|
||||
wake_up_all(&suballoc->free_event);
|
||||
kfree(cmdbuf->pmrs);
|
||||
kfree(cmdbuf);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
struct etnaviv_gpu;
|
||||
struct etnaviv_cmdbuf_suballoc;
|
||||
struct etnaviv_perfmon_request;
|
||||
|
||||
struct etnaviv_cmdbuf {
|
||||
/* suballocator this cmdbuf is allocated from */
|
||||
|
@ -38,6 +39,9 @@ struct etnaviv_cmdbuf {
|
|||
u32 exec_state;
|
||||
/* per GPU in-flight list */
|
||||
struct list_head node;
|
||||
/* perfmon requests */
|
||||
unsigned int nr_pmrs;
|
||||
struct etnaviv_perfmon_request *pmrs;
|
||||
/* BOs attached to this command buffer */
|
||||
unsigned int nr_bos;
|
||||
struct etnaviv_vram_mapping *bo_map[0];
|
||||
|
@ -49,7 +53,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
|
|||
|
||||
struct etnaviv_cmdbuf *
|
||||
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
|
||||
size_t nr_bos);
|
||||
size_t nr_bos, size_t nr_pmrs);
|
||||
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
|
||||
|
||||
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_gem.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
#include "etnaviv_perfmon.h"
|
||||
|
||||
#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
|
||||
static bool reglog;
|
||||
|
@ -451,6 +452,40 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct etnaviv_drm_private *priv = dev->dev_private;
|
||||
struct drm_etnaviv_pm_domain *args = data;
|
||||
struct etnaviv_gpu *gpu;
|
||||
|
||||
if (args->pipe >= ETNA_MAX_PIPES)
|
||||
return -EINVAL;
|
||||
|
||||
gpu = priv->gpu[args->pipe];
|
||||
if (!gpu)
|
||||
return -ENXIO;
|
||||
|
||||
return etnaviv_pm_query_dom(gpu, args);
|
||||
}
|
||||
|
||||
static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct etnaviv_drm_private *priv = dev->dev_private;
|
||||
struct drm_etnaviv_pm_signal *args = data;
|
||||
struct etnaviv_gpu *gpu;
|
||||
|
||||
if (args->pipe >= ETNA_MAX_PIPES)
|
||||
return -EINVAL;
|
||||
|
||||
gpu = priv->gpu[args->pipe];
|
||||
if (!gpu)
|
||||
return -ENXIO;
|
||||
|
||||
return etnaviv_pm_query_sig(gpu, args);
|
||||
}
|
||||
|
||||
static const struct drm_ioctl_desc etnaviv_ioctls[] = {
|
||||
#define ETNA_IOCTL(n, func, flags) \
|
||||
DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
|
||||
|
@ -463,6 +498,8 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
|
|||
ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static const struct vm_operations_struct vm_ops = {
|
||||
|
@ -513,7 +550,7 @@ static struct drm_driver etnaviv_drm_driver = {
|
|||
.desc = "etnaviv DRM",
|
||||
.date = "20151214",
|
||||
.major = 1,
|
||||
.minor = 1,
|
||||
.minor = 2,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
|
@ -92,15 +91,12 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
|
|||
void etnaviv_gem_free_object(struct drm_gem_object *obj);
|
||||
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
u32 size, u32 flags, u32 *handle);
|
||||
struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
|
||||
u32 size, u32 flags);
|
||||
struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
|
||||
u32 size, u32 flags);
|
||||
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
|
||||
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
|
||||
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
|
||||
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
|
||||
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
|
||||
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
|
||||
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
||||
struct etnaviv_cmdbuf *cmdbuf);
|
||||
void etnaviv_validate_init(void);
|
||||
|
|
|
@ -704,25 +704,6 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
|
||||
u32 size, u32 flags)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = __etnaviv_gem_new(dev, size, flags);
|
||||
if (IS_ERR(obj))
|
||||
return obj;
|
||||
|
||||
ret = etnaviv_gem_obj_add(dev, obj);
|
||||
if (ret < 0) {
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
|
||||
struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
|
||||
struct etnaviv_gem_object **res)
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "etnaviv_drv.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_gem.h"
|
||||
#include "etnaviv_perfmon.h"
|
||||
|
||||
/*
|
||||
* Cmdstream submission:
|
||||
|
@ -283,6 +284,54 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
|
||||
struct etnaviv_cmdbuf *cmdbuf,
|
||||
const struct drm_etnaviv_gem_submit_pmr *pmrs,
|
||||
u32 nr_pms)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < nr_pms; i++) {
|
||||
const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
|
||||
struct etnaviv_gem_submit_bo *bo;
|
||||
int ret;
|
||||
|
||||
ret = submit_bo(submit, r->read_idx, &bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* at offset 0 a sequence number gets stored used for userspace sync */
|
||||
if (r->read_offset == 0) {
|
||||
DRM_ERROR("perfmon request: offset is 0");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
|
||||
DRM_ERROR("perfmon request: offset %u outside object", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
|
||||
DRM_ERROR("perfmon request: flags are not valid");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (etnaviv_pm_req_validate(r, cmdbuf->exec_state)) {
|
||||
DRM_ERROR("perfmon request: domain or signal not valid");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmdbuf->pmrs[i].flags = r->flags;
|
||||
cmdbuf->pmrs[i].domain = r->domain;
|
||||
cmdbuf->pmrs[i].signal = r->signal;
|
||||
cmdbuf->pmrs[i].sequence = r->sequence;
|
||||
cmdbuf->pmrs[i].offset = r->read_offset;
|
||||
cmdbuf->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void submit_cleanup(struct etnaviv_gem_submit *submit)
|
||||
{
|
||||
unsigned i;
|
||||
|
@ -306,6 +355,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
struct etnaviv_drm_private *priv = dev->dev_private;
|
||||
struct drm_etnaviv_gem_submit *args = data;
|
||||
struct drm_etnaviv_gem_submit_reloc *relocs;
|
||||
struct drm_etnaviv_gem_submit_pmr *pmrs;
|
||||
struct drm_etnaviv_gem_submit_bo *bos;
|
||||
struct etnaviv_gem_submit *submit;
|
||||
struct etnaviv_cmdbuf *cmdbuf;
|
||||
|
@ -347,11 +397,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
*/
|
||||
bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
|
||||
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
|
||||
pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
|
||||
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
|
||||
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
|
||||
ALIGN(args->stream_size, 8) + 8,
|
||||
args->nr_bos);
|
||||
if (!bos || !relocs || !stream || !cmdbuf) {
|
||||
args->nr_bos, args->nr_pmrs);
|
||||
if (!bos || !relocs || !pmrs || !stream || !cmdbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_submit_cmds;
|
||||
}
|
||||
|
@ -373,6 +424,14 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
goto err_submit_cmds;
|
||||
}
|
||||
|
||||
ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
|
||||
args->nr_pmrs * sizeof(*pmrs));
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto err_submit_cmds;
|
||||
}
|
||||
cmdbuf->nr_pmrs = args->nr_pmrs;
|
||||
|
||||
ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
|
||||
args->stream_size);
|
||||
if (ret) {
|
||||
|
@ -441,6 +500,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = submit_perfmon_validate(submit, cmdbuf, pmrs, args->nr_pmrs);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
memcpy(cmdbuf->vaddr, stream, args->stream_size);
|
||||
cmdbuf->user_size = ALIGN(args->stream_size, 8);
|
||||
|
||||
|
@ -496,6 +559,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
kvfree(bos);
|
||||
if (relocs)
|
||||
kvfree(relocs);
|
||||
if (pmrs)
|
||||
kvfree(pmrs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_gem.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
#include "etnaviv_perfmon.h"
|
||||
#include "common.xml.h"
|
||||
#include "state.xml.h"
|
||||
#include "state_hi.xml.h"
|
||||
|
@ -420,9 +421,10 @@ static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
|
|||
gpu->base_rate_shader >> gpu->freq_scale);
|
||||
} else {
|
||||
unsigned int fscale = 1 << (6 - gpu->freq_scale);
|
||||
u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
|
||||
VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
|
||||
u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
|
||||
|
||||
clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
|
||||
clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
|
||||
etnaviv_gpu_load_clock(gpu, clock);
|
||||
}
|
||||
}
|
||||
|
@ -433,24 +435,14 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
|
|||
unsigned long timeout;
|
||||
bool failed = true;
|
||||
|
||||
/* TODO
|
||||
*
|
||||
* - clock gating
|
||||
* - puls eater
|
||||
* - what about VG?
|
||||
*/
|
||||
|
||||
/* We hope that the GPU resets in under one second */
|
||||
timeout = jiffies + msecs_to_jiffies(1000);
|
||||
|
||||
while (time_is_after_jiffies(timeout)) {
|
||||
/* enable clock */
|
||||
etnaviv_gpu_update_clock(gpu);
|
||||
|
||||
control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
|
||||
|
||||
/* Wait for stable clock. Vivante's code waited for 1ms */
|
||||
usleep_range(1000, 10000);
|
||||
unsigned int fscale = 1 << (6 - gpu->freq_scale);
|
||||
control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
|
||||
etnaviv_gpu_load_clock(gpu, control);
|
||||
|
||||
/* isolate the GPU. */
|
||||
control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
|
||||
|
@ -461,7 +453,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
|
|||
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
|
||||
|
||||
/* wait for reset. */
|
||||
msleep(1);
|
||||
usleep_range(10, 20);
|
||||
|
||||
/* reset soft reset bit. */
|
||||
control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
|
||||
|
@ -490,6 +482,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
|
|||
continue;
|
||||
}
|
||||
|
||||
/* disable debug registers, as they are not normally needed */
|
||||
control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
|
||||
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
|
||||
|
||||
failed = false;
|
||||
break;
|
||||
}
|
||||
|
@ -721,7 +717,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|||
}
|
||||
|
||||
/* Create buffer: */
|
||||
gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
|
||||
gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0, 0);
|
||||
if (!gpu->buffer) {
|
||||
ret = -ENOMEM;
|
||||
dev_err(gpu->dev, "could not create command buffer\n");
|
||||
|
@ -739,10 +735,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|||
/* Setup event management */
|
||||
spin_lock_init(&gpu->event_spinlock);
|
||||
init_completion(&gpu->event_free);
|
||||
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
|
||||
gpu->event[i].used = false;
|
||||
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
|
||||
for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
|
||||
complete(&gpu->event_free);
|
||||
}
|
||||
|
||||
/* Now program the hardware */
|
||||
mutex_lock(&gpu->lock);
|
||||
|
@ -926,7 +921,7 @@ static void recover_worker(struct work_struct *work)
|
|||
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
|
||||
recover_work);
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
unsigned int i = 0;
|
||||
|
||||
dev_err(gpu->dev, "hangcheck recover!\n");
|
||||
|
||||
|
@ -945,14 +940,12 @@ static void recover_worker(struct work_struct *work)
|
|||
|
||||
/* complete all events, the GPU won't do it after the reset */
|
||||
spin_lock_irqsave(&gpu->event_spinlock, flags);
|
||||
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
|
||||
if (!gpu->event[i].used)
|
||||
continue;
|
||||
for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) {
|
||||
dma_fence_signal(gpu->event[i].fence);
|
||||
gpu->event[i].fence = NULL;
|
||||
gpu->event[i].used = false;
|
||||
complete(&gpu->event_free);
|
||||
}
|
||||
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
|
||||
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
|
||||
gpu->completed_fence = gpu->active_fence;
|
||||
|
||||
|
@ -1140,30 +1133,45 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
|
|||
* event management:
|
||||
*/
|
||||
|
||||
static unsigned int event_alloc(struct etnaviv_gpu *gpu)
|
||||
static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
|
||||
unsigned int *events)
|
||||
{
|
||||
unsigned long ret, flags;
|
||||
unsigned int i, event = ~0U;
|
||||
unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
|
||||
unsigned i, acquired = 0;
|
||||
|
||||
ret = wait_for_completion_timeout(&gpu->event_free,
|
||||
msecs_to_jiffies(10 * 10000));
|
||||
if (!ret)
|
||||
dev_err(gpu->dev, "wait_for_completion_timeout failed");
|
||||
for (i = 0; i < nr_events; i++) {
|
||||
unsigned long ret;
|
||||
|
||||
ret = wait_for_completion_timeout(&gpu->event_free, timeout);
|
||||
|
||||
if (!ret) {
|
||||
dev_err(gpu->dev, "wait_for_completion_timeout failed");
|
||||
goto out;
|
||||
}
|
||||
|
||||
acquired++;
|
||||
timeout = ret;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpu->event_spinlock, flags);
|
||||
|
||||
/* find first free event */
|
||||
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
|
||||
if (gpu->event[i].used == false) {
|
||||
gpu->event[i].used = true;
|
||||
event = i;
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < nr_events; i++) {
|
||||
int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
|
||||
|
||||
events[i] = event;
|
||||
memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
|
||||
set_bit(event, gpu->event_bitmap);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
|
||||
|
||||
return event;
|
||||
return 0;
|
||||
|
||||
out:
|
||||
for (i = 0; i < acquired; i++)
|
||||
complete(&gpu->event_free);
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
|
||||
|
@ -1172,12 +1180,12 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
|
|||
|
||||
spin_lock_irqsave(&gpu->event_spinlock, flags);
|
||||
|
||||
if (gpu->event[event].used == false) {
|
||||
if (!test_bit(event, gpu->event_bitmap)) {
|
||||
dev_warn(gpu->dev, "event %u is already marked as free",
|
||||
event);
|
||||
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
|
||||
} else {
|
||||
gpu->event[event].used = false;
|
||||
clear_bit(event, gpu->event_bitmap);
|
||||
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
|
||||
|
||||
complete(&gpu->event_free);
|
||||
|
@ -1311,12 +1319,71 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
|
|||
pm_runtime_put_autosuspend(gpu->dev);
|
||||
}
|
||||
|
||||
static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_event *event, unsigned int flags)
|
||||
{
|
||||
const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < cmdbuf->nr_pmrs; i++) {
|
||||
const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
|
||||
|
||||
if (pmr->flags == flags)
|
||||
etnaviv_perfmon_process(gpu, pmr);
|
||||
}
|
||||
}
|
||||
|
||||
static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_event *event)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* disable clock gating */
|
||||
val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
|
||||
val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
|
||||
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
|
||||
|
||||
/* enable debug register */
|
||||
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
|
||||
val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
|
||||
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
|
||||
|
||||
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
|
||||
}
|
||||
|
||||
static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_event *event)
|
||||
{
|
||||
const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
|
||||
unsigned int i;
|
||||
u32 val;
|
||||
|
||||
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
|
||||
|
||||
for (i = 0; i < cmdbuf->nr_pmrs; i++) {
|
||||
const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
|
||||
|
||||
*pmr->bo_vma = pmr->sequence;
|
||||
}
|
||||
|
||||
/* disable debug register */
|
||||
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
|
||||
val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
|
||||
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
|
||||
|
||||
/* enable clock gating */
|
||||
val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
|
||||
val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
|
||||
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
|
||||
}
|
||||
|
||||
|
||||
/* add bo's to gpu's ring, and kick gpu: */
|
||||
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
unsigned int event, i;
|
||||
unsigned int i, nr_events = 1, event[3];
|
||||
int ret;
|
||||
|
||||
ret = etnaviv_gpu_pm_get_sync(gpu);
|
||||
|
@ -1332,10 +1399,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
|||
*
|
||||
*/
|
||||
|
||||
event = event_alloc(gpu);
|
||||
if (unlikely(event == ~0U)) {
|
||||
DRM_ERROR("no free event\n");
|
||||
ret = -EBUSY;
|
||||
/*
|
||||
* if there are performance monitor requests we need to have
|
||||
* - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
|
||||
* requests.
|
||||
* - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
|
||||
* and update the sequence number for userspace.
|
||||
*/
|
||||
if (cmdbuf->nr_pmrs)
|
||||
nr_events = 3;
|
||||
|
||||
ret = event_alloc(gpu, nr_events, event);
|
||||
if (ret) {
|
||||
DRM_ERROR("no free events\n");
|
||||
goto out_pm_put;
|
||||
}
|
||||
|
||||
|
@ -1343,12 +1419,14 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
|||
|
||||
fence = etnaviv_gpu_fence_alloc(gpu);
|
||||
if (!fence) {
|
||||
event_free(gpu, event);
|
||||
for (i = 0; i < nr_events; i++)
|
||||
event_free(gpu, event[i]);
|
||||
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
gpu->event[event].fence = fence;
|
||||
gpu->event[event[0]].fence = fence;
|
||||
submit->fence = dma_fence_get(fence);
|
||||
gpu->active_fence = submit->fence->seqno;
|
||||
|
||||
|
@ -1358,7 +1436,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
|||
gpu->lastctx = cmdbuf->ctx;
|
||||
}
|
||||
|
||||
etnaviv_buffer_queue(gpu, event, cmdbuf);
|
||||
if (cmdbuf->nr_pmrs) {
|
||||
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
|
||||
gpu->event[event[1]].cmdbuf = cmdbuf;
|
||||
etnaviv_sync_point_queue(gpu, event[1]);
|
||||
}
|
||||
|
||||
etnaviv_buffer_queue(gpu, event[0], cmdbuf);
|
||||
|
||||
if (cmdbuf->nr_pmrs) {
|
||||
gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
|
||||
gpu->event[event[2]].cmdbuf = cmdbuf;
|
||||
etnaviv_sync_point_queue(gpu, event[2]);
|
||||
}
|
||||
|
||||
cmdbuf->fence = fence;
|
||||
list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
|
||||
|
@ -1394,6 +1484,24 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void etnaviv_process_sync_point(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_event *event)
|
||||
{
|
||||
u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
|
||||
|
||||
event->sync_point(gpu, event);
|
||||
etnaviv_gpu_start_fe(gpu, addr + 2, 2);
|
||||
}
|
||||
|
||||
static void sync_point_worker(struct work_struct *work)
|
||||
{
|
||||
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
|
||||
sync_point_work);
|
||||
|
||||
etnaviv_process_sync_point(gpu, &gpu->event[gpu->sync_point_event]);
|
||||
event_free(gpu, gpu->sync_point_event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Init/Cleanup:
|
||||
*/
|
||||
|
@ -1440,7 +1548,15 @@ static irqreturn_t irq_handler(int irq, void *data)
|
|||
|
||||
dev_dbg(gpu->dev, "event %u\n", event);
|
||||
|
||||
if (gpu->event[event].sync_point) {
|
||||
gpu->sync_point_event = event;
|
||||
etnaviv_queue_work(gpu->drm, &gpu->sync_point_work);
|
||||
}
|
||||
|
||||
fence = gpu->event[event].fence;
|
||||
if (!fence)
|
||||
continue;
|
||||
|
||||
gpu->event[event].fence = NULL;
|
||||
dma_fence_signal(fence);
|
||||
|
||||
|
@ -1645,6 +1761,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
|
|||
|
||||
INIT_LIST_HEAD(&gpu->active_cmd_list);
|
||||
INIT_WORK(&gpu->retire_work, retire_worker);
|
||||
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
|
||||
INIT_WORK(&gpu->recover_work, recover_worker);
|
||||
init_waitqueue_head(&gpu->fence_event);
|
||||
|
||||
|
|
|
@ -88,13 +88,17 @@ struct etnaviv_chip_identity {
|
|||
};
|
||||
|
||||
struct etnaviv_event {
|
||||
bool used;
|
||||
struct dma_fence *fence;
|
||||
struct etnaviv_cmdbuf *cmdbuf;
|
||||
|
||||
void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
|
||||
};
|
||||
|
||||
struct etnaviv_cmdbuf_suballoc;
|
||||
struct etnaviv_cmdbuf;
|
||||
|
||||
#define ETNA_NR_EVENTS 30
|
||||
|
||||
struct etnaviv_gpu {
|
||||
struct drm_device *drm;
|
||||
struct thermal_cooling_device *cooling;
|
||||
|
@ -112,7 +116,8 @@ struct etnaviv_gpu {
|
|||
u32 memory_base;
|
||||
|
||||
/* event management: */
|
||||
struct etnaviv_event event[30];
|
||||
DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
|
||||
struct etnaviv_event event[ETNA_NR_EVENTS];
|
||||
struct completion event_free;
|
||||
spinlock_t event_spinlock;
|
||||
|
||||
|
@ -133,6 +138,10 @@ struct etnaviv_gpu {
|
|||
/* worker for handling active-list retiring: */
|
||||
struct work_struct retire_work;
|
||||
|
||||
/* worker for handling 'sync' points: */
|
||||
struct work_struct sync_point_work;
|
||||
int sync_point_event;
|
||||
|
||||
void __iomem *mmio;
|
||||
int irq;
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -31,174 +30,115 @@
|
|||
|
||||
#define GPU_MEM_START 0x80000000
|
||||
|
||||
struct etnaviv_iommu_domain_pgtable {
|
||||
u32 *pgtable;
|
||||
dma_addr_t paddr;
|
||||
struct etnaviv_iommuv1_domain {
|
||||
struct etnaviv_iommu_domain base;
|
||||
u32 *pgtable_cpu;
|
||||
dma_addr_t pgtable_dma;
|
||||
};
|
||||
|
||||
struct etnaviv_iommu_domain {
|
||||
struct iommu_domain domain;
|
||||
struct device *dev;
|
||||
void *bad_page_cpu;
|
||||
dma_addr_t bad_page_dma;
|
||||
struct etnaviv_iommu_domain_pgtable pgtable;
|
||||
spinlock_t map_lock;
|
||||
};
|
||||
|
||||
static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
|
||||
static struct etnaviv_iommuv1_domain *
|
||||
to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
|
||||
{
|
||||
return container_of(domain, struct etnaviv_iommu_domain, domain);
|
||||
return container_of(domain, struct etnaviv_iommuv1_domain, base);
|
||||
}
|
||||
|
||||
static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
|
||||
size_t size)
|
||||
{
|
||||
pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
|
||||
if (!pgtable->pgtable)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
|
||||
size_t size)
|
||||
{
|
||||
dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
|
||||
}
|
||||
|
||||
static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
|
||||
unsigned long iova)
|
||||
{
|
||||
/* calcuate index into page table */
|
||||
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
|
||||
phys_addr_t paddr;
|
||||
|
||||
paddr = pgtable->pgtable[index];
|
||||
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
|
||||
unsigned long iova, phys_addr_t paddr)
|
||||
{
|
||||
/* calcuate index into page table */
|
||||
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
|
||||
|
||||
pgtable->pgtable[index] = paddr;
|
||||
}
|
||||
|
||||
static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
|
||||
static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
|
||||
{
|
||||
u32 *p;
|
||||
int ret, i;
|
||||
int i;
|
||||
|
||||
etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
|
||||
SZ_4K,
|
||||
&etnaviv_domain->bad_page_dma,
|
||||
GFP_KERNEL);
|
||||
if (!etnaviv_domain->bad_page_cpu)
|
||||
etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
|
||||
etnaviv_domain->base.dev,
|
||||
SZ_4K,
|
||||
&etnaviv_domain->base.bad_page_dma,
|
||||
GFP_KERNEL);
|
||||
if (!etnaviv_domain->base.bad_page_cpu)
|
||||
return -ENOMEM;
|
||||
|
||||
p = etnaviv_domain->bad_page_cpu;
|
||||
p = etnaviv_domain->base.bad_page_cpu;
|
||||
for (i = 0; i < SZ_4K / 4; i++)
|
||||
*p++ = 0xdead55aa;
|
||||
|
||||
ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
|
||||
if (ret < 0) {
|
||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||
etnaviv_domain->bad_page_cpu,
|
||||
etnaviv_domain->bad_page_dma);
|
||||
return ret;
|
||||
etnaviv_domain->pgtable_cpu =
|
||||
dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
|
||||
&etnaviv_domain->pgtable_dma,
|
||||
GFP_KERNEL);
|
||||
if (!etnaviv_domain->pgtable_cpu) {
|
||||
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
||||
etnaviv_domain->base.bad_page_cpu,
|
||||
etnaviv_domain->base.bad_page_dma);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < PT_ENTRIES; i++)
|
||||
etnaviv_domain->pgtable.pgtable[i] =
|
||||
etnaviv_domain->bad_page_dma;
|
||||
|
||||
spin_lock_init(&etnaviv_domain->map_lock);
|
||||
etnaviv_domain->pgtable_cpu[i] =
|
||||
etnaviv_domain->base.bad_page_dma;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void etnaviv_domain_free(struct iommu_domain *domain)
|
||||
static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
|
||||
{
|
||||
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
||||
struct etnaviv_iommuv1_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(domain);
|
||||
|
||||
pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
|
||||
dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
|
||||
etnaviv_domain->pgtable_cpu,
|
||||
etnaviv_domain->pgtable_dma);
|
||||
|
||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||
etnaviv_domain->bad_page_cpu,
|
||||
etnaviv_domain->bad_page_dma);
|
||||
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
||||
etnaviv_domain->base.bad_page_cpu,
|
||||
etnaviv_domain->base.bad_page_dma);
|
||||
|
||||
kfree(etnaviv_domain);
|
||||
}
|
||||
|
||||
static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
|
||||
unsigned long iova, phys_addr_t paddr,
|
||||
size_t size, int prot)
|
||||
{
|
||||
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
||||
struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
||||
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
|
||||
|
||||
if (size != SZ_4K)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&etnaviv_domain->map_lock);
|
||||
pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
|
||||
spin_unlock(&etnaviv_domain->map_lock);
|
||||
etnaviv_domain->pgtable_cpu[index] = paddr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
|
||||
static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
||||
struct etnaviv_iommuv1_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(domain);
|
||||
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
|
||||
|
||||
if (size != SZ_4K)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&etnaviv_domain->map_lock);
|
||||
pgtable_write(&etnaviv_domain->pgtable, iova,
|
||||
etnaviv_domain->bad_page_dma);
|
||||
spin_unlock(&etnaviv_domain->map_lock);
|
||||
etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
|
||||
|
||||
return SZ_4K;
|
||||
}
|
||||
|
||||
static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
||||
|
||||
return pgtable_read(&etnaviv_domain->pgtable, iova);
|
||||
}
|
||||
|
||||
static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
|
||||
static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
|
||||
{
|
||||
return PT_SIZE;
|
||||
}
|
||||
|
||||
static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
|
||||
static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
|
||||
{
|
||||
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
||||
struct etnaviv_iommuv1_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(domain);
|
||||
|
||||
memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
|
||||
memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
|
||||
}
|
||||
|
||||
static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
|
||||
.ops = {
|
||||
.domain_free = etnaviv_domain_free,
|
||||
.map = etnaviv_iommuv1_map,
|
||||
.unmap = etnaviv_iommuv1_unmap,
|
||||
.iova_to_phys = etnaviv_iommu_iova_to_phys,
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
},
|
||||
.dump_size = etnaviv_iommuv1_dump_size,
|
||||
.dump = etnaviv_iommuv1_dump,
|
||||
};
|
||||
|
||||
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
struct etnaviv_iommu_domain *etnaviv_domain =
|
||||
struct etnaviv_iommuv1_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(gpu->mmu->domain);
|
||||
u32 pgtable;
|
||||
|
||||
|
@ -210,7 +150,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
|
|||
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
|
||||
|
||||
/* set page table address in MC */
|
||||
pgtable = (u32)etnaviv_domain->pgtable.paddr;
|
||||
pgtable = (u32)etnaviv_domain->pgtable_dma;
|
||||
|
||||
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
|
||||
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
|
||||
|
@ -219,28 +159,37 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
|
|||
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
|
||||
}
|
||||
|
||||
struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
|
||||
const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
|
||||
.free = etnaviv_iommuv1_domain_free,
|
||||
.map = etnaviv_iommuv1_map,
|
||||
.unmap = etnaviv_iommuv1_unmap,
|
||||
.dump_size = etnaviv_iommuv1_dump_size,
|
||||
.dump = etnaviv_iommuv1_dump,
|
||||
};
|
||||
|
||||
struct etnaviv_iommu_domain *
|
||||
etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
struct etnaviv_iommu_domain *etnaviv_domain;
|
||||
struct etnaviv_iommuv1_domain *etnaviv_domain;
|
||||
struct etnaviv_iommu_domain *domain;
|
||||
int ret;
|
||||
|
||||
etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
|
||||
if (!etnaviv_domain)
|
||||
return NULL;
|
||||
|
||||
etnaviv_domain->dev = gpu->dev;
|
||||
domain = &etnaviv_domain->base;
|
||||
|
||||
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
|
||||
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
|
||||
etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
|
||||
etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
|
||||
etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
|
||||
domain->dev = gpu->dev;
|
||||
domain->base = GPU_MEM_START;
|
||||
domain->size = PT_ENTRIES * SZ_4K;
|
||||
domain->ops = &etnaviv_iommuv1_ops;
|
||||
|
||||
ret = __etnaviv_iommu_init(etnaviv_domain);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
return &etnaviv_domain->domain;
|
||||
return &etnaviv_domain->base;
|
||||
|
||||
out_free:
|
||||
kfree(etnaviv_domain);
|
||||
|
|
|
@ -18,11 +18,14 @@
|
|||
#define __ETNAVIV_IOMMU_H__
|
||||
|
||||
struct etnaviv_gpu;
|
||||
struct etnaviv_iommu_domain;
|
||||
|
||||
struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
|
||||
struct etnaviv_iommu_domain *
|
||||
etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
|
||||
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
|
||||
|
||||
struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
|
||||
struct etnaviv_iommu_domain *
|
||||
etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
|
||||
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
|
||||
|
||||
#endif /* __ETNAVIV_IOMMU_H__ */
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -40,10 +39,7 @@
|
|||
#define MMUv2_MAX_STLB_ENTRIES 1024
|
||||
|
||||
struct etnaviv_iommuv2_domain {
|
||||
struct iommu_domain domain;
|
||||
struct device *dev;
|
||||
void *bad_page_cpu;
|
||||
dma_addr_t bad_page_dma;
|
||||
struct etnaviv_iommu_domain base;
|
||||
/* M(aster) TLB aka first level pagetable */
|
||||
u32 *mtlb_cpu;
|
||||
dma_addr_t mtlb_dma;
|
||||
|
@ -52,13 +48,15 @@ struct etnaviv_iommuv2_domain {
|
|||
dma_addr_t stlb_dma[1024];
|
||||
};
|
||||
|
||||
static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
|
||||
static struct etnaviv_iommuv2_domain *
|
||||
to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
|
||||
{
|
||||
return container_of(domain, struct etnaviv_iommuv2_domain, domain);
|
||||
return container_of(domain, struct etnaviv_iommuv2_domain, base);
|
||||
}
|
||||
|
||||
static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
|
||||
unsigned long iova, phys_addr_t paddr,
|
||||
size_t size, int prot)
|
||||
{
|
||||
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(domain);
|
||||
|
@ -68,7 +66,7 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
|
|||
if (size != SZ_4K)
|
||||
return -EINVAL;
|
||||
|
||||
if (prot & IOMMU_WRITE)
|
||||
if (prot & ETNAVIV_PROT_WRITE)
|
||||
entry |= MMUv2_PTE_WRITEABLE;
|
||||
|
||||
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
|
||||
|
@ -79,8 +77,8 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(domain);
|
||||
|
@ -97,38 +95,26 @@ static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
|
|||
return SZ_4K;
|
||||
}
|
||||
|
||||
static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(domain);
|
||||
int mtlb_entry, stlb_entry;
|
||||
|
||||
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
|
||||
stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
|
||||
|
||||
return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
|
||||
}
|
||||
|
||||
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
|
||||
{
|
||||
u32 *p;
|
||||
int ret, i, j;
|
||||
|
||||
/* allocate scratch page */
|
||||
etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
|
||||
SZ_4K,
|
||||
&etnaviv_domain->bad_page_dma,
|
||||
GFP_KERNEL);
|
||||
if (!etnaviv_domain->bad_page_cpu) {
|
||||
etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
|
||||
etnaviv_domain->base.dev,
|
||||
SZ_4K,
|
||||
&etnaviv_domain->base.bad_page_dma,
|
||||
GFP_KERNEL);
|
||||
if (!etnaviv_domain->base.bad_page_cpu) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_mem;
|
||||
}
|
||||
p = etnaviv_domain->bad_page_cpu;
|
||||
p = etnaviv_domain->base.bad_page_cpu;
|
||||
for (i = 0; i < SZ_4K / 4; i++)
|
||||
*p++ = 0xdead55aa;
|
||||
|
||||
etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
|
||||
etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
|
||||
SZ_4K,
|
||||
&etnaviv_domain->mtlb_dma,
|
||||
GFP_KERNEL);
|
||||
|
@ -140,7 +126,7 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
|
|||
/* pre-populate STLB pages (may want to switch to on-demand later) */
|
||||
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
|
||||
etnaviv_domain->stlb_cpu[i] =
|
||||
dma_alloc_coherent(etnaviv_domain->dev,
|
||||
dma_alloc_coherent(etnaviv_domain->base.dev,
|
||||
SZ_4K,
|
||||
&etnaviv_domain->stlb_dma[i],
|
||||
GFP_KERNEL);
|
||||
|
@ -159,19 +145,19 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
|
|||
return 0;
|
||||
|
||||
fail_mem:
|
||||
if (etnaviv_domain->bad_page_cpu)
|
||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||
etnaviv_domain->bad_page_cpu,
|
||||
etnaviv_domain->bad_page_dma);
|
||||
if (etnaviv_domain->base.bad_page_cpu)
|
||||
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
||||
etnaviv_domain->base.bad_page_cpu,
|
||||
etnaviv_domain->base.bad_page_dma);
|
||||
|
||||
if (etnaviv_domain->mtlb_cpu)
|
||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
||||
etnaviv_domain->mtlb_cpu,
|
||||
etnaviv_domain->mtlb_dma);
|
||||
|
||||
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
|
||||
if (etnaviv_domain->stlb_cpu[i])
|
||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
||||
etnaviv_domain->stlb_cpu[i],
|
||||
etnaviv_domain->stlb_dma[i]);
|
||||
}
|
||||
|
@ -179,23 +165,23 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
|
||||
static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
|
||||
{
|
||||
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(domain);
|
||||
int i;
|
||||
|
||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||
etnaviv_domain->bad_page_cpu,
|
||||
etnaviv_domain->bad_page_dma);
|
||||
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
||||
etnaviv_domain->base.bad_page_cpu,
|
||||
etnaviv_domain->base.bad_page_dma);
|
||||
|
||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
||||
etnaviv_domain->mtlb_cpu,
|
||||
etnaviv_domain->mtlb_dma);
|
||||
|
||||
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
|
||||
if (etnaviv_domain->stlb_cpu[i])
|
||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
|
||||
etnaviv_domain->stlb_cpu[i],
|
||||
etnaviv_domain->stlb_dma[i]);
|
||||
}
|
||||
|
@ -203,7 +189,7 @@ static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
|
|||
vfree(etnaviv_domain);
|
||||
}
|
||||
|
||||
static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
|
||||
static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
|
||||
{
|
||||
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(domain);
|
||||
|
@ -217,7 +203,7 @@ static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
|
|||
return dump_size;
|
||||
}
|
||||
|
||||
static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
|
||||
static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
|
||||
{
|
||||
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
||||
to_etnaviv_domain(domain);
|
||||
|
@ -230,18 +216,6 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
|
|||
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
|
||||
}
|
||||
|
||||
static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
|
||||
.ops = {
|
||||
.domain_free = etnaviv_iommuv2_domain_free,
|
||||
.map = etnaviv_iommuv2_map,
|
||||
.unmap = etnaviv_iommuv2_unmap,
|
||||
.iova_to_phys = etnaviv_iommuv2_iova_to_phys,
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
},
|
||||
.dump_size = etnaviv_iommuv2_dump_size,
|
||||
.dump = etnaviv_iommuv2_dump,
|
||||
};
|
||||
|
||||
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
struct etnaviv_iommuv2_domain *etnaviv_domain =
|
||||
|
@ -254,35 +228,45 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
|
|||
|
||||
prefetch = etnaviv_buffer_config_mmuv2(gpu,
|
||||
(u32)etnaviv_domain->mtlb_dma,
|
||||
(u32)etnaviv_domain->bad_page_dma);
|
||||
(u32)etnaviv_domain->base.bad_page_dma);
|
||||
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
|
||||
prefetch);
|
||||
etnaviv_gpu_wait_idle(gpu, 100);
|
||||
|
||||
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
|
||||
}
|
||||
struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
|
||||
|
||||
const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
|
||||
.free = etnaviv_iommuv2_domain_free,
|
||||
.map = etnaviv_iommuv2_map,
|
||||
.unmap = etnaviv_iommuv2_unmap,
|
||||
.dump_size = etnaviv_iommuv2_dump_size,
|
||||
.dump = etnaviv_iommuv2_dump,
|
||||
};
|
||||
|
||||
struct etnaviv_iommu_domain *
|
||||
etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
struct etnaviv_iommuv2_domain *etnaviv_domain;
|
||||
struct etnaviv_iommu_domain *domain;
|
||||
int ret;
|
||||
|
||||
etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
|
||||
if (!etnaviv_domain)
|
||||
return NULL;
|
||||
|
||||
etnaviv_domain->dev = gpu->dev;
|
||||
domain = &etnaviv_domain->base;
|
||||
|
||||
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
|
||||
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
|
||||
etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
|
||||
etnaviv_domain->domain.geometry.aperture_start = 0;
|
||||
etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
|
||||
domain->dev = gpu->dev;
|
||||
domain->base = 0;
|
||||
domain->size = (u64)SZ_1G * 4;
|
||||
domain->ops = &etnaviv_iommuv2_ops;
|
||||
|
||||
ret = etnaviv_iommuv2_init(etnaviv_domain);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
return &etnaviv_domain->domain;
|
||||
return &etnaviv_domain->base;
|
||||
|
||||
out_free:
|
||||
vfree(etnaviv_domain);
|
||||
|
|
|
@ -22,17 +22,64 @@
|
|||
#include "etnaviv_iommu.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
|
||||
static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
|
||||
unsigned long iova, int flags, void *arg)
|
||||
static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
|
||||
return 0;
|
||||
size_t unmapped_page, unmapped = 0;
|
||||
size_t pgsize = SZ_4K;
|
||||
|
||||
if (!IS_ALIGNED(iova | size, pgsize)) {
|
||||
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
|
||||
iova, size, pgsize);
|
||||
return;
|
||||
}
|
||||
|
||||
while (unmapped < size) {
|
||||
unmapped_page = domain->ops->unmap(domain, iova, pgsize);
|
||||
if (!unmapped_page)
|
||||
break;
|
||||
|
||||
iova += unmapped_page;
|
||||
unmapped += unmapped_page;
|
||||
}
|
||||
}
|
||||
|
||||
int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
|
||||
struct sg_table *sgt, unsigned len, int prot)
|
||||
static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
|
||||
unsigned long iova, phys_addr_t paddr,
|
||||
size_t size, int prot)
|
||||
{
|
||||
struct iommu_domain *domain = iommu->domain;
|
||||
unsigned long orig_iova = iova;
|
||||
size_t pgsize = SZ_4K;
|
||||
size_t orig_size = size;
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
|
||||
pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
|
||||
iova, &paddr, size, pgsize);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while (size) {
|
||||
ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
iova += pgsize;
|
||||
paddr += pgsize;
|
||||
size -= pgsize;
|
||||
}
|
||||
|
||||
/* unroll mapping in case something went wrong */
|
||||
if (ret)
|
||||
etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
|
||||
struct sg_table *sgt, unsigned len, int prot)
|
||||
{
|
||||
struct etnaviv_iommu_domain *domain = iommu->domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
unsigned int i, j;
|
||||
|
@ -47,7 +94,7 @@ int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
|
|||
|
||||
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
|
||||
|
||||
ret = iommu_map(domain, da, pa, bytes, prot);
|
||||
ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
@ -62,27 +109,24 @@ int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
|
|||
for_each_sg(sgt->sgl, sg, i, j) {
|
||||
size_t bytes = sg_dma_len(sg) + sg->offset;
|
||||
|
||||
iommu_unmap(domain, da, bytes);
|
||||
etnaviv_domain_unmap(domain, da, bytes);
|
||||
da += bytes;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
|
||||
struct sg_table *sgt, unsigned len)
|
||||
static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
|
||||
struct sg_table *sgt, unsigned len)
|
||||
{
|
||||
struct iommu_domain *domain = iommu->domain;
|
||||
struct etnaviv_iommu_domain *domain = iommu->domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
size_t bytes = sg_dma_len(sg) + sg->offset;
|
||||
size_t unmapped;
|
||||
|
||||
unmapped = iommu_unmap(domain, da, bytes);
|
||||
if (unmapped < bytes)
|
||||
return unmapped;
|
||||
etnaviv_domain_unmap(domain, da, bytes);
|
||||
|
||||
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
|
||||
|
||||
|
@ -90,8 +134,6 @@ int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
|
|||
|
||||
da += bytes;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
|
||||
|
@ -237,7 +279,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
|
|||
mmu->last_iova = node->start + etnaviv_obj->base.size;
|
||||
mapping->iova = node->start;
|
||||
ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
|
||||
IOMMU_READ | IOMMU_WRITE);
|
||||
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
|
||||
|
||||
if (ret < 0) {
|
||||
drm_mm_remove_node(node);
|
||||
|
@ -271,7 +313,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
|
|||
void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
|
||||
{
|
||||
drm_mm_takedown(&mmu->mm);
|
||||
iommu_domain_free(mmu->domain);
|
||||
mmu->domain->ops->free(mmu->domain);
|
||||
kfree(mmu);
|
||||
}
|
||||
|
||||
|
@ -303,11 +345,7 @@ struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
|
|||
mutex_init(&mmu->lock);
|
||||
INIT_LIST_HEAD(&mmu->mappings);
|
||||
|
||||
drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
|
||||
mmu->domain->geometry.aperture_end -
|
||||
mmu->domain->geometry.aperture_start + 1);
|
||||
|
||||
iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
|
||||
drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
|
||||
|
||||
return mmu;
|
||||
}
|
||||
|
@ -338,8 +376,8 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
|
|||
mutex_unlock(&mmu->lock);
|
||||
return ret;
|
||||
}
|
||||
ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
|
||||
IOMMU_READ);
|
||||
ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
|
||||
size, ETNAVIV_PROT_READ);
|
||||
if (ret < 0) {
|
||||
drm_mm_remove_node(vram_node);
|
||||
mutex_unlock(&mmu->lock);
|
||||
|
@ -362,25 +400,17 @@ void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
|
|||
|
||||
if (mmu->version == ETNAVIV_IOMMU_V2) {
|
||||
mutex_lock(&mmu->lock);
|
||||
iommu_unmap(mmu->domain,iova, size);
|
||||
etnaviv_domain_unmap(mmu->domain, iova, size);
|
||||
drm_mm_remove_node(vram_node);
|
||||
mutex_unlock(&mmu->lock);
|
||||
}
|
||||
}
|
||||
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
|
||||
{
|
||||
struct etnaviv_iommu_ops *ops;
|
||||
|
||||
ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
|
||||
|
||||
return ops->dump_size(iommu->domain);
|
||||
return iommu->domain->ops->dump_size(iommu->domain);
|
||||
}
|
||||
|
||||
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
|
||||
{
|
||||
struct etnaviv_iommu_ops *ops;
|
||||
|
||||
ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
|
||||
|
||||
ops->dump(iommu->domain, buf);
|
||||
iommu->domain->ops->dump(iommu->domain, buf);
|
||||
}
|
||||
|
|
|
@ -17,7 +17,8 @@
|
|||
#ifndef __ETNAVIV_MMU_H__
|
||||
#define __ETNAVIV_MMU_H__
|
||||
|
||||
#include <linux/iommu.h>
|
||||
#define ETNAVIV_PROT_READ (1 << 0)
|
||||
#define ETNAVIV_PROT_WRITE (1 << 1)
|
||||
|
||||
enum etnaviv_iommu_version {
|
||||
ETNAVIV_IOMMU_V1 = 0,
|
||||
|
@ -26,16 +27,31 @@ enum etnaviv_iommu_version {
|
|||
|
||||
struct etnaviv_gpu;
|
||||
struct etnaviv_vram_mapping;
|
||||
struct etnaviv_iommu_domain;
|
||||
|
||||
struct etnaviv_iommu_ops {
|
||||
struct iommu_ops ops;
|
||||
size_t (*dump_size)(struct iommu_domain *);
|
||||
void (*dump)(struct iommu_domain *, void *);
|
||||
struct etnaviv_iommu_domain_ops {
|
||||
void (*free)(struct etnaviv_iommu_domain *);
|
||||
int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot);
|
||||
size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
size_t (*dump_size)(struct etnaviv_iommu_domain *);
|
||||
void (*dump)(struct etnaviv_iommu_domain *, void *);
|
||||
};
|
||||
|
||||
struct etnaviv_iommu_domain {
|
||||
struct device *dev;
|
||||
void *bad_page_cpu;
|
||||
dma_addr_t bad_page_dma;
|
||||
u64 base;
|
||||
u64 size;
|
||||
|
||||
const struct etnaviv_iommu_domain_ops *ops;
|
||||
};
|
||||
|
||||
struct etnaviv_iommu {
|
||||
struct etnaviv_gpu *gpu;
|
||||
struct iommu_domain *domain;
|
||||
struct etnaviv_iommu_domain *domain;
|
||||
|
||||
enum etnaviv_iommu_version version;
|
||||
|
||||
|
@ -49,18 +65,11 @@ struct etnaviv_iommu {
|
|||
|
||||
struct etnaviv_gem_object;
|
||||
|
||||
int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
|
||||
int cnt);
|
||||
int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
|
||||
struct sg_table *sgt, unsigned len, int prot);
|
||||
int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
|
||||
struct sg_table *sgt, unsigned len);
|
||||
int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
|
||||
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
|
||||
struct etnaviv_vram_mapping *mapping);
|
||||
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
|
||||
struct etnaviv_vram_mapping *mapping);
|
||||
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
|
||||
|
||||
int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
|
||||
struct drm_mm_node *vram_node, size_t size,
|
||||
|
@ -73,6 +82,7 @@ size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
|
|||
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
|
||||
|
||||
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
|
||||
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
|
||||
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
|
||||
|
||||
#endif /* __ETNAVIV_MMU_H__ */
|
||||
|
|
|
@ -0,0 +1,495 @@
|
|||
/*
|
||||
* Copyright (C) 2017 Etnaviv Project
|
||||
* Copyright (C) 2017 Zodiac Inflight Innovations
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_perfmon.h"
|
||||
#include "state_hi.xml.h"
|
||||
|
||||
struct etnaviv_pm_domain;
|
||||
|
||||
struct etnaviv_pm_signal {
|
||||
char name[64];
|
||||
u32 data;
|
||||
|
||||
u32 (*sample)(struct etnaviv_gpu *gpu,
|
||||
const struct etnaviv_pm_domain *domain,
|
||||
const struct etnaviv_pm_signal *signal);
|
||||
};
|
||||
|
||||
struct etnaviv_pm_domain {
|
||||
char name[64];
|
||||
|
||||
/* profile register */
|
||||
u32 profile_read;
|
||||
u32 profile_config;
|
||||
|
||||
u8 nr_signals;
|
||||
const struct etnaviv_pm_signal *signal;
|
||||
};
|
||||
|
||||
struct etnaviv_pm_domain_meta {
|
||||
const struct etnaviv_pm_domain *domains;
|
||||
u32 nr_domains;
|
||||
};
|
||||
|
||||
static u32 simple_reg_read(struct etnaviv_gpu *gpu,
|
||||
const struct etnaviv_pm_domain *domain,
|
||||
const struct etnaviv_pm_signal *signal)
|
||||
{
|
||||
return gpu_read(gpu, signal->data);
|
||||
}
|
||||
|
||||
static u32 perf_reg_read(struct etnaviv_gpu *gpu,
|
||||
const struct etnaviv_pm_domain *domain,
|
||||
const struct etnaviv_pm_signal *signal)
|
||||
{
|
||||
gpu_write(gpu, domain->profile_config, signal->data);
|
||||
|
||||
return gpu_read(gpu, domain->profile_read);
|
||||
}
|
||||
|
||||
static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
|
||||
const struct etnaviv_pm_domain *domain,
|
||||
const struct etnaviv_pm_signal *signal)
|
||||
{
|
||||
u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
|
||||
u32 value = 0;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < gpu->identity.pixel_pipes; i++) {
|
||||
clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
|
||||
clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(i);
|
||||
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
|
||||
gpu_write(gpu, domain->profile_config, signal->data);
|
||||
value += gpu_read(gpu, domain->profile_read);
|
||||
}
|
||||
|
||||
/* switch back to pixel pipe 0 to prevent GPU hang */
|
||||
clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
|
||||
clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(0);
|
||||
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static const struct etnaviv_pm_domain doms_3d[] = {
|
||||
{
|
||||
.name = "HI",
|
||||
.profile_read = VIVS_MC_PROFILE_HI_READ,
|
||||
.profile_config = VIVS_MC_PROFILE_CONFIG2,
|
||||
.nr_signals = 5,
|
||||
.signal = (const struct etnaviv_pm_signal[]) {
|
||||
{
|
||||
"TOTAL_CYCLES",
|
||||
VIVS_HI_PROFILE_TOTAL_CYCLES,
|
||||
&simple_reg_read
|
||||
},
|
||||
{
|
||||
"IDLE_CYCLES",
|
||||
VIVS_HI_PROFILE_IDLE_CYCLES,
|
||||
&simple_reg_read
|
||||
},
|
||||
{
|
||||
"AXI_CYCLES_READ_REQUEST_STALLED",
|
||||
VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"AXI_CYCLES_WRITE_REQUEST_STALLED",
|
||||
VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"AXI_CYCLES_WRITE_DATA_STALLED",
|
||||
VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED,
|
||||
&perf_reg_read
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "PE",
|
||||
.profile_read = VIVS_MC_PROFILE_PE_READ,
|
||||
.profile_config = VIVS_MC_PROFILE_CONFIG0,
|
||||
.nr_signals = 5,
|
||||
.signal = (const struct etnaviv_pm_signal[]) {
|
||||
{
|
||||
"PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
|
||||
VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"PIXEL_COUNT_KILLED_BY_DEPTH_PIPE",
|
||||
VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"PIXEL_COUNT_DRAWN_BY_COLOR_PIPE",
|
||||
VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE",
|
||||
VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE,
|
||||
&pipe_reg_read
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "SH",
|
||||
.profile_read = VIVS_MC_PROFILE_SH_READ,
|
||||
.profile_config = VIVS_MC_PROFILE_CONFIG0,
|
||||
.nr_signals = 9,
|
||||
.signal = (const struct etnaviv_pm_signal[]) {
|
||||
{
|
||||
"SHADER_CYCLES",
|
||||
VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"PS_INST_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"RENDERED_PIXEL_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"VS_INST_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"RENDERED_VERTICE_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"VTX_BRANCH_INST_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"VTX_TEXLD_INST_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"PXL_BRANCH_INST_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"PXL_TEXLD_INST_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER,
|
||||
&pipe_reg_read
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "PA",
|
||||
.profile_read = VIVS_MC_PROFILE_PA_READ,
|
||||
.profile_config = VIVS_MC_PROFILE_CONFIG1,
|
||||
.nr_signals = 6,
|
||||
.signal = (const struct etnaviv_pm_signal[]) {
|
||||
{
|
||||
"INPUT_VTX_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"INPUT_PRIM_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"OUTPUT_PRIM_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"DEPTH_CLIPPED_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"TRIVIAL_REJECTED_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER,
|
||||
&pipe_reg_read
|
||||
},
|
||||
{
|
||||
"CULLED_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER,
|
||||
&pipe_reg_read
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "SE",
|
||||
.profile_read = VIVS_MC_PROFILE_SE_READ,
|
||||
.profile_config = VIVS_MC_PROFILE_CONFIG1,
|
||||
.nr_signals = 2,
|
||||
.signal = (const struct etnaviv_pm_signal[]) {
|
||||
{
|
||||
"CULLED_TRIANGLE_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"CULLED_LINES_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT,
|
||||
&perf_reg_read
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "RA",
|
||||
.profile_read = VIVS_MC_PROFILE_RA_READ,
|
||||
.profile_config = VIVS_MC_PROFILE_CONFIG1,
|
||||
.nr_signals = 7,
|
||||
.signal = (const struct etnaviv_pm_signal[]) {
|
||||
{
|
||||
"VALID_PIXEL_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"TOTAL_QUAD_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"VALID_QUAD_COUNT_AFTER_EARLY_Z",
|
||||
VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"TOTAL_PRIMITIVE_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"PIPE_CACHE_MISS_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"PREFETCH_CACHE_MISS_COUNTER",
|
||||
VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"CULLED_QUAD_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT,
|
||||
&perf_reg_read
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "TX",
|
||||
.profile_read = VIVS_MC_PROFILE_TX_READ,
|
||||
.profile_config = VIVS_MC_PROFILE_CONFIG1,
|
||||
.nr_signals = 9,
|
||||
.signal = (const struct etnaviv_pm_signal[]) {
|
||||
{
|
||||
"TOTAL_BILINEAR_REQUESTS",
|
||||
VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"TOTAL_TRILINEAR_REQUESTS",
|
||||
VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"TOTAL_DISCARDED_TEXTURE_REQUESTS",
|
||||
VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"TOTAL_TEXTURE_REQUESTS",
|
||||
VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"MEM_READ_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"MEM_READ_IN_8B_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"CACHE_MISS_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"CACHE_HIT_TEXEL_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"CACHE_MISS_TEXEL_COUNT",
|
||||
VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT,
|
||||
&perf_reg_read
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "MC",
|
||||
.profile_read = VIVS_MC_PROFILE_MC_READ,
|
||||
.profile_config = VIVS_MC_PROFILE_CONFIG2,
|
||||
.nr_signals = 3,
|
||||
.signal = (const struct etnaviv_pm_signal[]) {
|
||||
{
|
||||
"TOTAL_READ_REQ_8B_FROM_PIPELINE",
|
||||
VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"TOTAL_READ_REQ_8B_FROM_IP",
|
||||
VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP,
|
||||
&perf_reg_read
|
||||
},
|
||||
{
|
||||
"TOTAL_WRITE_REQ_8B_FROM_PIPELINE",
|
||||
VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE,
|
||||
&perf_reg_read
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static const struct etnaviv_pm_domain doms_2d[] = {
|
||||
{
|
||||
.name = "PE",
|
||||
.profile_read = VIVS_MC_PROFILE_PE_READ,
|
||||
.profile_config = VIVS_MC_PROFILE_CONFIG0,
|
||||
.nr_signals = 1,
|
||||
.signal = (const struct etnaviv_pm_signal[]) {
|
||||
{
|
||||
"PIXELS_RENDERED_2D",
|
||||
VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D,
|
||||
&pipe_reg_read
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static const struct etnaviv_pm_domain doms_vg[] = {
|
||||
};
|
||||
|
||||
static const struct etnaviv_pm_domain_meta doms_meta[] = {
|
||||
{
|
||||
.nr_domains = ARRAY_SIZE(doms_3d),
|
||||
.domains = &doms_3d[0]
|
||||
},
|
||||
{
|
||||
.nr_domains = ARRAY_SIZE(doms_2d),
|
||||
.domains = &doms_2d[0]
|
||||
},
|
||||
{
|
||||
.nr_domains = ARRAY_SIZE(doms_vg),
|
||||
.domains = &doms_vg[0]
|
||||
}
|
||||
};
|
||||
|
||||
int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
|
||||
struct drm_etnaviv_pm_domain *domain)
|
||||
{
|
||||
const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
|
||||
const struct etnaviv_pm_domain *dom;
|
||||
|
||||
if (domain->iter >= meta->nr_domains)
|
||||
return -EINVAL;
|
||||
|
||||
dom = meta->domains + domain->iter;
|
||||
|
||||
domain->id = domain->iter;
|
||||
domain->nr_signals = dom->nr_signals;
|
||||
strncpy(domain->name, dom->name, sizeof(domain->name));
|
||||
|
||||
domain->iter++;
|
||||
if (domain->iter == meta->nr_domains)
|
||||
domain->iter = 0xff;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
|
||||
struct drm_etnaviv_pm_signal *signal)
|
||||
{
|
||||
const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
|
||||
const struct etnaviv_pm_domain *dom;
|
||||
const struct etnaviv_pm_signal *sig;
|
||||
|
||||
if (signal->domain >= meta->nr_domains)
|
||||
return -EINVAL;
|
||||
|
||||
dom = meta->domains + signal->domain;
|
||||
|
||||
if (signal->iter > dom->nr_signals)
|
||||
return -EINVAL;
|
||||
|
||||
sig = &dom->signal[signal->iter];
|
||||
|
||||
signal->id = signal->iter;
|
||||
strncpy(signal->name, sig->name, sizeof(signal->name));
|
||||
|
||||
signal->iter++;
|
||||
if (signal->iter == dom->nr_signals)
|
||||
signal->iter = 0xffff;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
|
||||
u32 exec_state)
|
||||
{
|
||||
const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
|
||||
const struct etnaviv_pm_domain *dom;
|
||||
|
||||
if (r->domain >= meta->nr_domains)
|
||||
return -EINVAL;
|
||||
|
||||
dom = meta->domains + r->domain;
|
||||
|
||||
if (r->signal > dom->nr_signals)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
|
||||
const struct etnaviv_perfmon_request *pmr)
|
||||
{
|
||||
const struct etnaviv_pm_domain_meta *meta = &doms_meta[gpu->exec_state];
|
||||
const struct etnaviv_pm_domain *dom;
|
||||
const struct etnaviv_pm_signal *sig;
|
||||
u32 *bo = pmr->bo_vma;
|
||||
u32 val;
|
||||
|
||||
dom = meta->domains + pmr->domain;
|
||||
sig = &dom->signal[pmr->signal];
|
||||
val = sig->sample(gpu, dom, sig);
|
||||
|
||||
*(bo + pmr->offset) = val;
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (C) 2017 Etnaviv Project
|
||||
* Copyright (C) 2017 Zodiac Inflight Innovations
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ETNAVIV_PERFMON_H__
|
||||
#define __ETNAVIV_PERFMON_H__
|
||||
|
||||
struct etnaviv_gpu;
|
||||
struct drm_etnaviv_pm_domain;
|
||||
struct drm_etnaviv_pm_signal;
|
||||
|
||||
struct etnaviv_perfmon_request
|
||||
{
|
||||
u32 flags;
|
||||
u8 domain;
|
||||
u8 signal;
|
||||
u32 sequence;
|
||||
|
||||
/* bo to store a value */
|
||||
u32 *bo_vma;
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
|
||||
struct drm_etnaviv_pm_domain *domain);
|
||||
|
||||
int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
|
||||
struct drm_etnaviv_pm_signal *signal);
|
||||
|
||||
int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
|
||||
u32 exec_state);
|
||||
|
||||
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
|
||||
const struct etnaviv_perfmon_request *pmr);
|
||||
|
||||
#endif /* __ETNAVIV_PERFMON_H__ */
|
|
@ -150,6 +150,19 @@ struct drm_etnaviv_gem_submit_bo {
|
|||
__u64 presumed; /* in/out, presumed buffer address */
|
||||
};
|
||||
|
||||
/* performance monitor request (pmr) */
|
||||
#define ETNA_PM_PROCESS_PRE 0x0001
|
||||
#define ETNA_PM_PROCESS_POST 0x0002
|
||||
struct drm_etnaviv_gem_submit_pmr {
|
||||
__u32 flags; /* in, when to process request (ETNA_PM_PROCESS_x) */
|
||||
__u8 domain; /* in, pm domain */
|
||||
__u8 pad;
|
||||
__u16 signal; /* in, pm signal */
|
||||
__u32 sequence; /* in, sequence number */
|
||||
__u32 read_offset; /* in, offset from read_bo */
|
||||
__u32 read_idx; /* in, index of read_bo buffer */
|
||||
};
|
||||
|
||||
/* Each cmdstream submit consists of a table of buffers involved, and
|
||||
* one or more cmdstream buffers. This allows for conditional execution
|
||||
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
|
||||
|
@ -175,6 +188,9 @@ struct drm_etnaviv_gem_submit {
|
|||
__u64 stream; /* in, ptr to cmdstream */
|
||||
__u32 flags; /* in, mask of ETNA_SUBMIT_x */
|
||||
__s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
|
||||
__u64 pmrs; /* in, ptr to array of submit_pmr's */
|
||||
__u32 nr_pmrs; /* in, number of submit_pmr's */
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/* The normal way to synchronize with the GPU is just to CPU_PREP on
|
||||
|
@ -210,6 +226,27 @@ struct drm_etnaviv_gem_wait {
|
|||
struct drm_etnaviv_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
/*
|
||||
* Performance Monitor (PM):
|
||||
*/
|
||||
|
||||
struct drm_etnaviv_pm_domain {
|
||||
__u32 pipe; /* in */
|
||||
__u8 iter; /* in/out, select pm domain at index iter */
|
||||
__u8 id; /* out, id of domain */
|
||||
__u16 nr_signals; /* out, how many signals does this domain provide */
|
||||
char name[64]; /* out, name of domain */
|
||||
};
|
||||
|
||||
struct drm_etnaviv_pm_signal {
|
||||
__u32 pipe; /* in */
|
||||
__u8 domain; /* in, pm domain index */
|
||||
__u8 pad;
|
||||
__u16 iter; /* in/out, select pm source at index iter */
|
||||
__u16 id; /* out, id of signal */
|
||||
char name[64]; /* out, name of domain */
|
||||
};
|
||||
|
||||
#define DRM_ETNAVIV_GET_PARAM 0x00
|
||||
/* placeholder:
|
||||
#define DRM_ETNAVIV_SET_PARAM 0x01
|
||||
|
@ -222,7 +259,9 @@ struct drm_etnaviv_gem_wait {
|
|||
#define DRM_ETNAVIV_WAIT_FENCE 0x07
|
||||
#define DRM_ETNAVIV_GEM_USERPTR 0x08
|
||||
#define DRM_ETNAVIV_GEM_WAIT 0x09
|
||||
#define DRM_ETNAVIV_NUM_IOCTLS 0x0a
|
||||
#define DRM_ETNAVIV_PM_QUERY_DOM 0x0a
|
||||
#define DRM_ETNAVIV_PM_QUERY_SIG 0x0b
|
||||
#define DRM_ETNAVIV_NUM_IOCTLS 0x0c
|
||||
|
||||
#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
|
||||
|
@ -233,6 +272,8 @@ struct drm_etnaviv_gem_wait {
|
|||
#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
|
||||
#define DRM_IOCTL_ETNAVIV_PM_QUERY_DOM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_DOM, struct drm_etnaviv_pm_domain)
|
||||
#define DRM_IOCTL_ETNAVIV_PM_QUERY_SIG DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_SIG, struct drm_etnaviv_pm_signal)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue