2010-11-26 02:00:26 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2008,2010 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
* Chris Wilson <chris@chris-wilson.co.uk>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2012-10-03 01:01:07 +08:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/i915_drm.h>
|
2010-11-26 02:00:26 +08:00
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_trace.h"
|
|
|
|
#include "intel_drv.h"
|
2011-12-10 09:16:37 +08:00
|
|
|
#include <linux/dma_remapping.h>
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2010-12-08 18:38:14 +08:00
|
|
|
struct eb_objects {
|
2013-01-08 18:53:15 +08:00
|
|
|
struct list_head objects;
|
2010-12-08 18:38:14 +08:00
|
|
|
int and;
|
2013-01-08 18:53:17 +08:00
|
|
|
union {
|
|
|
|
struct drm_i915_gem_object *lut[0];
|
|
|
|
struct hlist_head buckets[0];
|
|
|
|
};
|
2010-12-08 18:38:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct eb_objects *
|
2013-01-08 18:53:17 +08:00
|
|
|
eb_create(struct drm_i915_gem_execbuffer2 *args)
|
2010-12-08 18:38:14 +08:00
|
|
|
{
|
2013-01-08 18:53:17 +08:00
|
|
|
struct eb_objects *eb = NULL;
|
|
|
|
|
|
|
|
if (args->flags & I915_EXEC_HANDLE_LUT) {
|
|
|
|
int size = args->buffer_count;
|
|
|
|
size *= sizeof(struct drm_i915_gem_object *);
|
|
|
|
size += sizeof(struct eb_objects);
|
|
|
|
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (eb == NULL) {
|
|
|
|
int size = args->buffer_count;
|
|
|
|
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
|
|
|
|
BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
|
|
|
|
while (count > 2*size)
|
|
|
|
count >>= 1;
|
|
|
|
eb = kzalloc(count*sizeof(struct hlist_head) +
|
|
|
|
sizeof(struct eb_objects),
|
|
|
|
GFP_TEMPORARY);
|
|
|
|
if (eb == NULL)
|
|
|
|
return eb;
|
|
|
|
|
|
|
|
eb->and = count - 1;
|
|
|
|
} else
|
|
|
|
eb->and = -args->buffer_count;
|
|
|
|
|
2013-01-08 18:53:15 +08:00
|
|
|
INIT_LIST_HEAD(&eb->objects);
|
2010-12-08 18:38:14 +08:00
|
|
|
return eb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
eb_reset(struct eb_objects *eb)
|
|
|
|
{
|
2013-01-08 18:53:17 +08:00
|
|
|
if (eb->and >= 0)
|
|
|
|
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
|
2010-12-08 18:38:14 +08:00
|
|
|
}
|
|
|
|
|
2013-01-08 18:53:14 +08:00
|
|
|
static int
|
|
|
|
eb_lookup_objects(struct eb_objects *eb,
|
|
|
|
struct drm_i915_gem_exec_object2 *exec,
|
2013-01-08 18:53:17 +08:00
|
|
|
const struct drm_i915_gem_execbuffer2 *args,
|
2013-01-08 18:53:15 +08:00
|
|
|
struct drm_file *file)
|
2013-01-08 18:53:14 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
spin_lock(&file->table_lock);
|
2013-01-08 18:53:17 +08:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
2013-01-08 18:53:14 +08:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
|
|
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
|
|
|
|
if (obj == NULL) {
|
|
|
|
spin_unlock(&file->table_lock);
|
|
|
|
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
|
|
|
exec[i].handle, i);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_empty(&obj->exec_list)) {
|
|
|
|
spin_unlock(&file->table_lock);
|
|
|
|
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
|
|
|
|
obj, exec[i].handle, i);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_gem_object_reference(&obj->base);
|
2013-01-08 18:53:15 +08:00
|
|
|
list_add_tail(&obj->exec_list, &eb->objects);
|
2013-01-08 18:53:14 +08:00
|
|
|
|
|
|
|
obj->exec_entry = &exec[i];
|
2013-01-08 18:53:17 +08:00
|
|
|
if (eb->and < 0) {
|
|
|
|
eb->lut[i] = obj;
|
|
|
|
} else {
|
|
|
|
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
|
|
|
|
obj->exec_handle = handle;
|
|
|
|
hlist_add_head(&obj->exec_node,
|
|
|
|
&eb->buckets[handle & eb->and]);
|
|
|
|
}
|
2013-01-08 18:53:14 +08:00
|
|
|
}
|
|
|
|
spin_unlock(&file->table_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-12-08 18:38:14 +08:00
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
eb_get_object(struct eb_objects *eb, unsigned long handle)
|
|
|
|
{
|
2013-01-08 18:53:17 +08:00
|
|
|
if (eb->and < 0) {
|
|
|
|
if (handle >= -eb->and)
|
|
|
|
return NULL;
|
|
|
|
return eb->lut[handle];
|
|
|
|
} else {
|
|
|
|
struct hlist_head *head;
|
|
|
|
struct hlist_node *node;
|
2010-12-08 18:38:14 +08:00
|
|
|
|
2013-01-08 18:53:17 +08:00
|
|
|
head = &eb->buckets[handle & eb->and];
|
|
|
|
hlist_for_each(node, head) {
|
|
|
|
struct drm_i915_gem_object *obj;
|
2010-12-08 18:38:14 +08:00
|
|
|
|
2013-01-08 18:53:17 +08:00
|
|
|
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
|
|
|
|
if (obj->exec_handle == handle)
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-12-08 18:38:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
eb_destroy(struct eb_objects *eb)
|
|
|
|
{
|
2013-01-08 18:53:15 +08:00
|
|
|
while (!list_empty(&eb->objects)) {
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
|
|
obj = list_first_entry(&eb->objects,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
exec_list);
|
|
|
|
list_del_init(&obj->exec_list);
|
|
|
|
drm_gem_object_unreference(&obj->base);
|
|
|
|
}
|
2010-12-08 18:38:14 +08:00
|
|
|
kfree(eb);
|
|
|
|
}
|
|
|
|
|
2012-03-26 16:10:27 +08:00
|
|
|
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
|
2012-08-23 20:12:52 +08:00
|
|
|
!obj->map_and_fenceable ||
|
2012-03-26 16:10:27 +08:00
|
|
|
obj->cache_level != I915_CACHE_NONE);
|
|
|
|
}
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
static int
|
|
|
|
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
2010-12-08 18:38:14 +08:00
|
|
|
struct eb_objects *eb,
|
2010-11-26 02:00:26 +08:00
|
|
|
struct drm_i915_gem_relocation_entry *reloc)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->base.dev;
|
|
|
|
struct drm_gem_object *target_obj;
|
2012-02-16 06:50:23 +08:00
|
|
|
struct drm_i915_gem_object *target_i915_obj;
|
2010-11-26 02:00:26 +08:00
|
|
|
uint32_t target_offset;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
2010-12-08 18:38:14 +08:00
|
|
|
/* we've already hold a reference to all valid objects */
|
|
|
|
target_obj = &eb_get_object(eb, reloc->target_handle)->base;
|
|
|
|
if (unlikely(target_obj == NULL))
|
2010-11-26 02:00:26 +08:00
|
|
|
return -ENOENT;
|
|
|
|
|
2012-02-16 06:50:23 +08:00
|
|
|
target_i915_obj = to_intel_bo(target_obj);
|
|
|
|
target_offset = target_i915_obj->gtt_offset;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2012-08-01 06:35:01 +08:00
|
|
|
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
|
|
|
|
* pipe_control writes because the gpu doesn't properly redirect them
|
|
|
|
* through the ppgtt for non_secure batchbuffers. */
|
|
|
|
if (unlikely(IS_GEN6(dev) &&
|
|
|
|
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
|
|
|
|
!target_i915_obj->has_global_gtt_mapping)) {
|
|
|
|
i915_gem_gtt_bind_object(target_i915_obj,
|
|
|
|
target_i915_obj->cache_level);
|
|
|
|
}
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
/* Validate that the target is in a valid r/w GPU domain */
|
2010-12-08 18:43:06 +08:00
|
|
|
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("reloc with multiple write domains: "
|
2010-11-26 02:00:26 +08:00
|
|
|
"obj %p target %d offset %d "
|
|
|
|
"read %08x write %08x",
|
|
|
|
obj, reloc->target_handle,
|
|
|
|
(int) reloc->offset,
|
|
|
|
reloc->read_domains,
|
|
|
|
reloc->write_domain);
|
2010-12-08 18:38:14 +08:00
|
|
|
return ret;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
2011-12-14 20:57:27 +08:00
|
|
|
if (unlikely((reloc->write_domain | reloc->read_domains)
|
|
|
|
& ~I915_GEM_GPU_DOMAINS)) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("reloc with read/write non-GPU domains: "
|
2010-11-26 02:00:26 +08:00
|
|
|
"obj %p target %d offset %d "
|
|
|
|
"read %08x write %08x",
|
|
|
|
obj, reloc->target_handle,
|
|
|
|
(int) reloc->offset,
|
|
|
|
reloc->read_domains,
|
|
|
|
reloc->write_domain);
|
2010-12-08 18:38:14 +08:00
|
|
|
return ret;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
target_obj->pending_read_domains |= reloc->read_domains;
|
|
|
|
target_obj->pending_write_domain |= reloc->write_domain;
|
|
|
|
|
|
|
|
/* If the relocation already has the right value in it, no
|
|
|
|
* more work needs to be done.
|
|
|
|
*/
|
|
|
|
if (target_offset == reloc->presumed_offset)
|
2010-12-08 18:38:14 +08:00
|
|
|
return 0;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
/* Check that the relocation address is valid... */
|
2010-12-08 18:43:06 +08:00
|
|
|
if (unlikely(reloc->offset > obj->base.size - 4)) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("Relocation beyond object bounds: "
|
2010-11-26 02:00:26 +08:00
|
|
|
"obj %p target %d offset %d size %d.\n",
|
|
|
|
obj, reloc->target_handle,
|
|
|
|
(int) reloc->offset,
|
|
|
|
(int) obj->base.size);
|
2010-12-08 18:38:14 +08:00
|
|
|
return ret;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
2010-12-08 18:43:06 +08:00
|
|
|
if (unlikely(reloc->offset & 3)) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("Relocation not 4-byte aligned: "
|
2010-11-26 02:00:26 +08:00
|
|
|
"obj %p target %d offset %d.\n",
|
|
|
|
obj, reloc->target_handle,
|
|
|
|
(int) reloc->offset);
|
2010-12-08 18:38:14 +08:00
|
|
|
return ret;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
2012-03-26 16:10:27 +08:00
|
|
|
/* We can't wait for rendering with pagefaults disabled */
|
|
|
|
if (obj->active && in_atomic())
|
|
|
|
return -EFAULT;
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
reloc->delta += target_offset;
|
2012-03-26 16:10:27 +08:00
|
|
|
if (use_cpu_reloc(obj)) {
|
2010-11-26 02:00:26 +08:00
|
|
|
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
|
|
|
|
char *vaddr;
|
|
|
|
|
2012-03-26 16:10:27 +08:00
|
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2012-06-01 22:20:22 +08:00
|
|
|
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
|
|
|
|
reloc->offset >> PAGE_SHIFT));
|
2010-11-26 02:00:26 +08:00
|
|
|
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
|
|
|
|
kunmap_atomic(vaddr);
|
|
|
|
} else {
|
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
uint32_t __iomem *reloc_entry;
|
|
|
|
void __iomem *reloc_page;
|
|
|
|
|
2012-04-14 16:55:51 +08:00
|
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = i915_gem_object_put_fence(obj);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (ret)
|
2010-12-08 18:38:14 +08:00
|
|
|
return ret;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
/* Map the page containing the relocation we're going to perform. */
|
|
|
|
reloc->offset += obj->gtt_offset;
|
|
|
|
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
|
|
|
|
reloc->offset & PAGE_MASK);
|
|
|
|
reloc_entry = (uint32_t __iomem *)
|
|
|
|
(reloc_page + (reloc->offset & ~PAGE_MASK));
|
|
|
|
iowrite32(reloc->delta, reloc_entry);
|
|
|
|
io_mapping_unmap_atomic(reloc_page);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* and update the user's relocation entry */
|
|
|
|
reloc->presumed_offset = target_offset;
|
|
|
|
|
2010-12-08 18:38:14 +08:00
|
|
|
return 0;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
|
2011-01-11 01:35:37 +08:00
|
|
|
struct eb_objects *eb)
|
2010-11-26 02:00:26 +08:00
|
|
|
{
|
2012-03-25 04:12:53 +08:00
|
|
|
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
|
|
|
|
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
|
2010-11-26 02:00:26 +08:00
|
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
2011-01-11 01:35:37 +08:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
2012-03-25 04:12:53 +08:00
|
|
|
int remain, ret;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
|
|
|
|
|
2012-03-25 04:12:53 +08:00
|
|
|
remain = entry->relocation_count;
|
|
|
|
while (remain) {
|
|
|
|
struct drm_i915_gem_relocation_entry *r = stack_reloc;
|
|
|
|
int count = remain;
|
|
|
|
if (count > ARRAY_SIZE(stack_reloc))
|
|
|
|
count = ARRAY_SIZE(stack_reloc);
|
|
|
|
remain -= count;
|
|
|
|
|
|
|
|
if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
|
2010-11-26 02:00:26 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2012-03-25 04:12:53 +08:00
|
|
|
do {
|
|
|
|
u64 offset = r->presumed_offset;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2012-03-25 04:12:53 +08:00
|
|
|
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (r->presumed_offset != offset &&
|
|
|
|
__copy_to_user_inatomic(&user_relocs->presumed_offset,
|
|
|
|
&r->presumed_offset,
|
|
|
|
sizeof(r->presumed_offset))) {
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
user_relocs++;
|
|
|
|
r++;
|
|
|
|
} while (--count);
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2012-03-25 04:12:53 +08:00
|
|
|
#undef N_RELOC
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
|
2010-12-08 18:38:14 +08:00
|
|
|
struct eb_objects *eb,
|
2010-11-26 02:00:26 +08:00
|
|
|
struct drm_i915_gem_relocation_entry *relocs)
|
|
|
|
{
|
2011-01-11 01:35:37 +08:00
|
|
|
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
2010-11-26 02:00:26 +08:00
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < entry->relocation_count; i++) {
|
2011-01-11 01:35:37 +08:00
|
|
|
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_gem_execbuffer_relocate(struct drm_device *dev,
|
2013-01-08 18:53:15 +08:00
|
|
|
struct eb_objects *eb)
|
2010-11-26 02:00:26 +08:00
|
|
|
{
|
2010-11-26 03:32:06 +08:00
|
|
|
struct drm_i915_gem_object *obj;
|
2011-03-14 23:11:24 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* This is the fast path and we cannot handle a pagefault whilst
|
|
|
|
* holding the struct mutex lest the user pass in the relocations
|
|
|
|
* contained within a mmaped bo. For in such a case we, the page
|
|
|
|
* fault handler would call i915_gem_fault() and we would try to
|
|
|
|
* acquire the struct mutex again. Obviously this is bad and so
|
|
|
|
* lockdep complains vehemently.
|
|
|
|
*/
|
|
|
|
pagefault_disable();
|
2013-01-08 18:53:15 +08:00
|
|
|
list_for_each_entry(obj, &eb->objects, exec_list) {
|
2011-01-11 01:35:37 +08:00
|
|
|
ret = i915_gem_execbuffer_relocate_object(obj, eb);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (ret)
|
2011-03-14 23:11:24 +08:00
|
|
|
break;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
2011-03-14 23:11:24 +08:00
|
|
|
pagefault_enable();
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2011-03-14 23:11:24 +08:00
|
|
|
return ret;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
2012-08-25 02:18:18 +08:00
|
|
|
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
|
|
|
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
2011-12-14 20:57:08 +08:00
|
|
|
|
2012-03-26 16:10:27 +08:00
|
|
|
static int
|
|
|
|
need_reloc_mappable(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
|
|
|
return entry->relocation_count && !use_cpu_reloc(obj);
|
|
|
|
}
|
|
|
|
|
2011-12-14 20:57:08 +08:00
|
|
|
static int
|
2012-08-25 02:18:18 +08:00
|
|
|
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
|
2013-01-18 05:23:36 +08:00
|
|
|
struct intel_ring_buffer *ring,
|
|
|
|
bool *need_reloc)
|
2011-12-14 20:57:08 +08:00
|
|
|
{
|
2012-08-25 02:18:18 +08:00
|
|
|
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
2011-12-14 20:57:08 +08:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
|
|
|
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
|
|
|
bool need_fence, need_mappable;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
need_fence =
|
|
|
|
has_fenced_gpu_access &&
|
|
|
|
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
|
|
obj->tiling_mode != I915_TILING_NONE;
|
2012-03-26 16:10:27 +08:00
|
|
|
need_mappable = need_fence || need_reloc_mappable(obj);
|
2011-12-14 20:57:08 +08:00
|
|
|
|
2012-08-11 22:41:04 +08:00
|
|
|
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
|
2011-12-14 20:57:08 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2012-08-25 02:18:18 +08:00
|
|
|
entry->flags |= __EXEC_OBJECT_HAS_PIN;
|
|
|
|
|
2011-12-14 20:57:08 +08:00
|
|
|
if (has_fenced_gpu_access) {
|
|
|
|
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
2012-04-17 22:31:24 +08:00
|
|
|
ret = i915_gem_object_get_fence(obj);
|
2012-03-22 23:10:00 +08:00
|
|
|
if (ret)
|
2012-08-25 02:18:18 +08:00
|
|
|
return ret;
|
2011-12-14 20:57:08 +08:00
|
|
|
|
2012-03-22 23:10:00 +08:00
|
|
|
if (i915_gem_object_pin_fence(obj))
|
2011-12-14 20:57:08 +08:00
|
|
|
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
|
2012-03-22 23:10:00 +08:00
|
|
|
|
2012-03-21 18:48:18 +08:00
|
|
|
obj->pending_fenced_gpu_access = true;
|
2011-12-14 20:57:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-25 02:18:18 +08:00
|
|
|
/* Ensure ppgtt mapping exists if needed */
|
|
|
|
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
|
|
|
|
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
|
|
|
|
obj, obj->cache_level);
|
|
|
|
|
|
|
|
obj->has_aliasing_ppgtt_mapping = 1;
|
|
|
|
}
|
|
|
|
|
2013-01-18 05:23:36 +08:00
|
|
|
if (entry->offset != obj->gtt_offset) {
|
|
|
|
entry->offset = obj->gtt_offset;
|
|
|
|
*need_reloc = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (entry->flags & EXEC_OBJECT_WRITE) {
|
|
|
|
obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
|
|
|
|
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
|
|
|
|
!obj->has_global_gtt_mapping)
|
|
|
|
i915_gem_gtt_bind_object(obj, obj->cache_level);
|
|
|
|
|
2011-12-14 20:57:08 +08:00
|
|
|
return 0;
|
2012-08-25 02:18:18 +08:00
|
|
|
}
|
2011-12-14 20:57:08 +08:00
|
|
|
|
2012-08-25 02:18:18 +08:00
|
|
|
static void
|
|
|
|
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_exec_object2 *entry;
|
|
|
|
|
|
|
|
if (!obj->gtt_space)
|
|
|
|
return;
|
|
|
|
|
|
|
|
entry = obj->exec_entry;
|
|
|
|
|
|
|
|
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
|
|
|
|
i915_gem_object_unpin_fence(obj);
|
|
|
|
|
|
|
|
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
|
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
|
|
|
|
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
|
2011-12-14 20:57:08 +08:00
|
|
|
}
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
static int
|
2010-11-11 00:40:20 +08:00
|
|
|
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
2010-11-26 02:00:26 +08:00
|
|
|
struct drm_file *file,
|
2013-01-18 05:23:36 +08:00
|
|
|
struct list_head *objects,
|
|
|
|
bool *need_relocs)
|
2010-11-26 02:00:26 +08:00
|
|
|
{
|
2010-11-26 03:32:06 +08:00
|
|
|
struct drm_i915_gem_object *obj;
|
2011-01-11 01:35:37 +08:00
|
|
|
struct list_head ordered_objects;
|
2012-08-25 02:18:18 +08:00
|
|
|
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
|
|
|
int retry;
|
2011-01-11 01:35:37 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&ordered_objects);
|
|
|
|
while (!list_empty(objects)) {
|
|
|
|
struct drm_i915_gem_exec_object2 *entry;
|
|
|
|
bool need_fence, need_mappable;
|
|
|
|
|
|
|
|
obj = list_first_entry(objects,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
exec_list);
|
|
|
|
entry = obj->exec_entry;
|
|
|
|
|
|
|
|
need_fence =
|
|
|
|
has_fenced_gpu_access &&
|
|
|
|
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
|
|
obj->tiling_mode != I915_TILING_NONE;
|
2012-03-26 16:10:27 +08:00
|
|
|
need_mappable = need_fence || need_reloc_mappable(obj);
|
2011-01-11 01:35:37 +08:00
|
|
|
|
|
|
|
if (need_mappable)
|
|
|
|
list_move(&obj->exec_list, &ordered_objects);
|
|
|
|
else
|
|
|
|
list_move_tail(&obj->exec_list, &ordered_objects);
|
2011-01-13 19:03:48 +08:00
|
|
|
|
2013-01-18 05:23:36 +08:00
|
|
|
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
|
2011-01-13 19:03:48 +08:00
|
|
|
obj->base.pending_write_domain = 0;
|
2012-07-20 19:41:07 +08:00
|
|
|
obj->pending_fenced_gpu_access = false;
|
2011-01-11 01:35:37 +08:00
|
|
|
}
|
|
|
|
list_splice(&ordered_objects, objects);
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
/* Attempt to pin all of the buffers into the GTT.
|
|
|
|
* This is done in 3 phases:
|
|
|
|
*
|
|
|
|
* 1a. Unbind all objects that do not match the GTT constraints for
|
|
|
|
* the execbuffer (fenceable, mappable, alignment etc).
|
|
|
|
* 1b. Increment pin count for already bound objects.
|
|
|
|
* 2. Bind new objects.
|
|
|
|
* 3. Decrement pin count.
|
|
|
|
*
|
2012-08-25 02:18:18 +08:00
|
|
|
* This avoid unnecessary unbinding of later objects in order to make
|
2010-11-26 02:00:26 +08:00
|
|
|
* room for the earlier objects *unless* we need to defragment.
|
|
|
|
*/
|
|
|
|
retry = 0;
|
|
|
|
do {
|
2012-08-25 02:18:18 +08:00
|
|
|
int ret = 0;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
/* Unbind any ill-fitting objects or pin. */
|
2010-11-26 03:32:06 +08:00
|
|
|
list_for_each_entry(obj, objects, exec_list) {
|
2011-01-11 01:35:37 +08:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
2010-11-26 02:00:26 +08:00
|
|
|
bool need_fence, need_mappable;
|
2011-12-14 20:57:08 +08:00
|
|
|
|
2011-01-11 01:35:37 +08:00
|
|
|
if (!obj->gtt_space)
|
2010-11-26 02:00:26 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
need_fence =
|
2010-12-06 01:11:54 +08:00
|
|
|
has_fenced_gpu_access &&
|
2010-11-26 02:00:26 +08:00
|
|
|
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
|
|
obj->tiling_mode != I915_TILING_NONE;
|
2012-03-26 16:10:27 +08:00
|
|
|
need_mappable = need_fence || need_reloc_mappable(obj);
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
|
|
|
|
(need_mappable && !obj->map_and_fenceable))
|
|
|
|
ret = i915_gem_object_unbind(obj);
|
|
|
|
else
|
2013-01-18 05:23:36 +08:00
|
|
|
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
|
2010-11-26 03:32:06 +08:00
|
|
|
if (ret)
|
2010-11-26 02:00:26 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bind fresh objects */
|
2010-11-26 03:32:06 +08:00
|
|
|
list_for_each_entry(obj, objects, exec_list) {
|
2011-12-14 20:57:08 +08:00
|
|
|
if (obj->gtt_space)
|
|
|
|
continue;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2013-01-18 05:23:36 +08:00
|
|
|
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
|
2012-08-25 02:18:18 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
2012-08-25 02:18:18 +08:00
|
|
|
err: /* Decrement pin count for bound objects */
|
|
|
|
list_for_each_entry(obj, objects, exec_list)
|
|
|
|
i915_gem_execbuffer_unreserve_object(obj);
|
2010-11-26 02:00:26 +08:00
|
|
|
|
drm/i915: Track unbound pages
When dealing with a working set larger than the GATT, or even the
mappable aperture when touching through the GTT, we end up with evicting
objects only to rebind them at a new offset again later. Moving an
object into and out of the GTT requires clflushing the pages, thus
causing a double-clflush penalty for rebinding.
To avoid having to clflush on rebinding, we can track the pages as they
are evicted from the GTT and only relinquish those pages on memory
pressure.
As usual, if it were not for the handling of out-of-memory condition and
having to manually shrink our own bo caches, it would be a net reduction
of code. Alas.
Note: The patch also contains a few changes to the last-hope
evict_everything logic in i916_gem_execbuffer.c - we no longer try to
only evict the purgeable stuff in a first try (since that's superflous
and only helps in OOM corner-cases, not fragmented-gtt trashing
situations).
Also, the extraction of the get_pages retry loop from bind_to_gtt (and
other callsites) to get_pages should imo have been a separate patch.
v2: Ditch the newly added put_pages (for unbound objects only) in
i915_gem_reset. A quick irc discussion hasn't revealed any important
reason for this, so if we need this, I'd like to have a git blame'able
explanation for it.
v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Split out code movements and rant a bit in the commit message
with a few Notes. Done v2]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
|
|
|
if (ret != -ENOSPC || retry++)
|
2010-11-26 02:00:26 +08:00
|
|
|
return ret;
|
|
|
|
|
drm/i915: Track unbound pages
When dealing with a working set larger than the GATT, or even the
mappable aperture when touching through the GTT, we end up with evicting
objects only to rebind them at a new offset again later. Moving an
object into and out of the GTT requires clflushing the pages, thus
causing a double-clflush penalty for rebinding.
To avoid having to clflush on rebinding, we can track the pages as they
are evicted from the GTT and only relinquish those pages on memory
pressure.
As usual, if it were not for the handling of out-of-memory condition and
having to manually shrink our own bo caches, it would be a net reduction
of code. Alas.
Note: The patch also contains a few changes to the last-hope
evict_everything logic in i916_gem_execbuffer.c - we no longer try to
only evict the purgeable stuff in a first try (since that's superflous
and only helps in OOM corner-cases, not fragmented-gtt trashing
situations).
Also, the extraction of the get_pages retry loop from bind_to_gtt (and
other callsites) to get_pages should imo have been a separate patch.
v2: Ditch the newly added put_pages (for unbound objects only) in
i915_gem_reset. A quick irc discussion hasn't revealed any important
reason for this, so if we need this, I'd like to have a git blame'able
explanation for it.
v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Split out code movements and rant a bit in the commit message
with a few Notes. Done v2]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 17:40:46 +08:00
|
|
|
ret = i915_gem_evict_everything(ring->dev);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} while (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
2013-01-18 05:23:36 +08:00
|
|
|
struct drm_i915_gem_execbuffer2 *args,
|
2010-11-26 02:00:26 +08:00
|
|
|
struct drm_file *file,
|
2010-11-11 00:40:20 +08:00
|
|
|
struct intel_ring_buffer *ring,
|
2010-12-08 18:38:14 +08:00
|
|
|
struct eb_objects *eb,
|
2013-01-18 05:23:36 +08:00
|
|
|
struct drm_i915_gem_exec_object2 *exec)
|
2010-11-26 02:00:26 +08:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_relocation_entry *reloc;
|
2010-11-26 03:32:06 +08:00
|
|
|
struct drm_i915_gem_object *obj;
|
2013-01-18 05:23:36 +08:00
|
|
|
bool need_relocs;
|
2011-01-13 07:49:13 +08:00
|
|
|
int *reloc_offset;
|
2010-11-26 02:00:26 +08:00
|
|
|
int i, total, ret;
|
2013-01-18 05:23:36 +08:00
|
|
|
int count = args->buffer_count;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2010-12-08 18:38:14 +08:00
|
|
|
/* We may process another execbuffer during the unlock... */
|
2013-01-08 18:53:15 +08:00
|
|
|
while (!list_empty(&eb->objects)) {
|
|
|
|
obj = list_first_entry(&eb->objects,
|
2010-12-08 18:38:14 +08:00
|
|
|
struct drm_i915_gem_object,
|
|
|
|
exec_list);
|
|
|
|
list_del_init(&obj->exec_list);
|
|
|
|
drm_gem_object_unreference(&obj->base);
|
|
|
|
}
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
total = 0;
|
|
|
|
for (i = 0; i < count; i++)
|
2010-11-26 03:32:06 +08:00
|
|
|
total += exec[i].relocation_count;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2011-01-13 07:49:13 +08:00
|
|
|
reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
|
2010-11-26 02:00:26 +08:00
|
|
|
reloc = drm_malloc_ab(total, sizeof(*reloc));
|
2011-01-13 07:49:13 +08:00
|
|
|
if (reloc == NULL || reloc_offset == NULL) {
|
|
|
|
drm_free_large(reloc);
|
|
|
|
drm_free_large(reloc_offset);
|
2010-11-26 02:00:26 +08:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
total = 0;
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
|
|
|
|
2010-11-26 03:32:06 +08:00
|
|
|
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
if (copy_from_user(reloc+total, user_relocs,
|
2010-11-26 03:32:06 +08:00
|
|
|
exec[i].relocation_count * sizeof(*reloc))) {
|
2010-11-26 02:00:26 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2011-01-13 07:49:13 +08:00
|
|
|
reloc_offset[i] = total;
|
2010-11-26 03:32:06 +08:00
|
|
|
total += exec[i].relocation_count;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret) {
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2010-12-08 18:38:14 +08:00
|
|
|
/* reacquire the objects */
|
|
|
|
eb_reset(eb);
|
2013-01-08 18:53:17 +08:00
|
|
|
ret = eb_lookup_objects(eb, exec, args, file);
|
2013-01-08 18:53:14 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2010-12-08 18:38:14 +08:00
|
|
|
|
2013-01-18 05:23:36 +08:00
|
|
|
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
|
|
|
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2013-01-08 18:53:15 +08:00
|
|
|
list_for_each_entry(obj, &eb->objects, exec_list) {
|
2011-01-13 07:49:13 +08:00
|
|
|
int offset = obj->exec_entry - exec;
|
2010-12-08 18:38:14 +08:00
|
|
|
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
|
2011-01-13 07:49:13 +08:00
|
|
|
reloc + reloc_offset[offset]);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Leave the user relocations as are, this is the painfully slow path,
|
|
|
|
* and we want to avoid the complication of dropping the lock whilst
|
|
|
|
* having buffers reserved in the aperture and so causing spurious
|
|
|
|
* ENOSPC for random operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
err:
|
|
|
|
drm_free_large(reloc);
|
2011-01-13 07:49:13 +08:00
|
|
|
drm_free_large(reloc_offset);
|
2010-11-26 02:00:26 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2010-11-26 03:32:06 +08:00
|
|
|
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
|
|
|
struct list_head *objects)
|
2010-11-26 02:00:26 +08:00
|
|
|
{
|
2010-11-26 03:32:06 +08:00
|
|
|
struct drm_i915_gem_object *obj;
|
2012-07-21 18:25:01 +08:00
|
|
|
uint32_t flush_domains = 0;
|
2010-11-26 03:32:06 +08:00
|
|
|
int ret;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2012-07-21 18:25:01 +08:00
|
|
|
list_for_each_entry(obj, objects, exec_list) {
|
|
|
|
ret = i915_gem_object_sync(obj, ring);
|
2011-03-06 21:51:29 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2012-07-21 18:25:01 +08:00
|
|
|
|
|
|
|
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
|
|
|
|
i915_gem_clflush_object(obj);
|
|
|
|
|
|
|
|
flush_domains |= obj->base.write_domain;
|
2011-03-06 21:51:29 +08:00
|
|
|
}
|
|
|
|
|
2012-07-21 18:25:01 +08:00
|
|
|
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
2012-11-05 01:21:27 +08:00
|
|
|
i915_gem_chipset_flush(ring->dev);
|
2012-07-21 18:25:01 +08:00
|
|
|
|
|
|
|
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
|
|
|
wmb();
|
|
|
|
|
2012-07-13 21:14:08 +08:00
|
|
|
/* Unconditionally invalidate gpu caches and ensure that we do flush
|
|
|
|
* any residual writes from the previous batch.
|
|
|
|
*/
|
2012-07-20 19:41:08 +08:00
|
|
|
return intel_ring_invalidate_all_caches(ring);
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
2010-11-26 03:32:06 +08:00
|
|
|
static bool
|
|
|
|
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
2010-11-26 02:00:26 +08:00
|
|
|
{
|
2013-01-18 05:23:36 +08:00
|
|
|
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
|
|
|
|
return false;
|
|
|
|
|
2010-11-26 03:32:06 +08:00
|
|
|
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
|
2010-11-26 02:00:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
|
|
|
int count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
|
|
|
|
int length; /* limited by fault_in_pages_readable() */
|
|
|
|
|
2013-01-18 05:23:36 +08:00
|
|
|
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
/* First check for malicious input causing overflow */
|
|
|
|
if (exec[i].relocation_count >
|
|
|
|
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
length = exec[i].relocation_count *
|
|
|
|
sizeof(struct drm_i915_gem_relocation_entry);
|
|
|
|
/* we may also need to update the presumed offsets */
|
|
|
|
if (!access_ok(VERIFY_WRITE, ptr, length))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2012-03-26 01:47:41 +08:00
|
|
|
if (fault_in_multipages_readable(ptr, length))
|
2010-11-26 02:00:26 +08:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-11-26 03:32:06 +08:00
|
|
|
static void
|
|
|
|
i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
2012-11-28 00:22:52 +08:00
|
|
|
struct intel_ring_buffer *ring)
|
2010-11-26 03:32:06 +08:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
|
|
list_for_each_entry(obj, objects, exec_list) {
|
2012-07-20 19:41:03 +08:00
|
|
|
u32 old_read = obj->base.read_domains;
|
|
|
|
u32 old_write = obj->base.write_domain;
|
2011-02-03 19:57:46 +08:00
|
|
|
|
2010-11-26 03:32:06 +08:00
|
|
|
obj->base.write_domain = obj->base.pending_write_domain;
|
2013-01-18 05:23:36 +08:00
|
|
|
if (obj->base.write_domain == 0)
|
|
|
|
obj->base.pending_read_domains |= obj->base.read_domains;
|
|
|
|
obj->base.read_domains = obj->base.pending_read_domains;
|
2010-11-26 03:32:06 +08:00
|
|
|
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
|
|
|
|
|
2012-11-28 00:22:52 +08:00
|
|
|
i915_gem_object_move_to_active(obj, ring);
|
2010-11-26 03:32:06 +08:00
|
|
|
if (obj->base.write_domain) {
|
|
|
|
obj->dirty = 1;
|
2012-11-28 00:22:52 +08:00
|
|
|
obj->last_write_seqno = intel_ring_get_seqno(ring);
|
2012-05-03 22:47:57 +08:00
|
|
|
if (obj->pin_count) /* check for potential scanout */
|
2012-07-21 19:31:41 +08:00
|
|
|
intel_mark_fb_busy(obj);
|
2010-11-26 03:32:06 +08:00
|
|
|
}
|
|
|
|
|
2011-02-03 19:57:46 +08:00
|
|
|
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
2010-11-26 03:32:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
static void
|
|
|
|
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
2010-11-26 03:32:06 +08:00
|
|
|
struct drm_file *file,
|
2010-11-26 02:00:26 +08:00
|
|
|
struct intel_ring_buffer *ring)
|
|
|
|
{
|
2012-06-14 02:45:19 +08:00
|
|
|
/* Unconditionally force add_request to emit a full flush. */
|
|
|
|
ring->gpu_caches_dirty = true;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2010-11-26 03:32:06 +08:00
|
|
|
/* Add a breadcrumb for the completion of the batch buffer */
|
2012-07-20 19:40:59 +08:00
|
|
|
(void)i915_add_request(ring, file, NULL);
|
2010-11-26 03:32:06 +08:00
|
|
|
}
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2012-01-04 01:23:29 +08:00
|
|
|
static int
|
|
|
|
i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|
|
|
struct intel_ring_buffer *ring)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = intel_ring_begin(ring, 4 * 3);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
|
|
|
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
|
|
|
|
intel_ring_emit(ring, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
intel_ring_advance(ring);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
static int
|
|
|
|
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file,
|
|
|
|
struct drm_i915_gem_execbuffer2 *args,
|
2010-11-26 03:32:06 +08:00
|
|
|
struct drm_i915_gem_exec_object2 *exec)
|
2010-11-26 02:00:26 +08:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2010-12-08 18:38:14 +08:00
|
|
|
struct eb_objects *eb;
|
2010-11-26 02:00:26 +08:00
|
|
|
struct drm_i915_gem_object *batch_obj;
|
|
|
|
struct drm_clip_rect *cliprects = NULL;
|
|
|
|
struct intel_ring_buffer *ring;
|
2012-06-05 05:42:55 +08:00
|
|
|
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
2010-11-30 22:10:25 +08:00
|
|
|
u32 exec_start, exec_len;
|
2013-01-18 05:23:36 +08:00
|
|
|
u32 mask, flags;
|
2010-12-19 19:42:05 +08:00
|
|
|
int ret, mode, i;
|
2013-01-18 05:23:36 +08:00
|
|
|
bool need_relocs;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2013-01-18 05:23:36 +08:00
|
|
|
if (!i915_gem_check_execbuffer(args))
|
2010-11-26 03:32:06 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = validate_exec_list(exec, args->buffer_count);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2012-10-17 19:09:54 +08:00
|
|
|
flags = 0;
|
|
|
|
if (args->flags & I915_EXEC_SECURE) {
|
|
|
|
if (!file->is_master || !capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
flags |= I915_DISPATCH_SECURE;
|
|
|
|
}
|
2012-12-17 23:21:27 +08:00
|
|
|
if (args->flags & I915_EXEC_IS_PINNED)
|
|
|
|
flags |= I915_DISPATCH_PINNED;
|
2012-10-17 19:09:54 +08:00
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
switch (args->flags & I915_EXEC_RING_MASK) {
|
|
|
|
case I915_EXEC_DEFAULT:
|
|
|
|
case I915_EXEC_RENDER:
|
2010-12-04 19:30:53 +08:00
|
|
|
ring = &dev_priv->ring[RCS];
|
2010-11-26 02:00:26 +08:00
|
|
|
break;
|
|
|
|
case I915_EXEC_BSD:
|
2010-12-04 19:30:53 +08:00
|
|
|
ring = &dev_priv->ring[VCS];
|
2012-06-05 05:42:55 +08:00
|
|
|
if (ctx_id != 0) {
|
|
|
|
DRM_DEBUG("Ring %s doesn't support contexts\n",
|
|
|
|
ring->name);
|
|
|
|
return -EPERM;
|
|
|
|
}
|
2010-11-26 02:00:26 +08:00
|
|
|
break;
|
|
|
|
case I915_EXEC_BLT:
|
2010-12-04 19:30:53 +08:00
|
|
|
ring = &dev_priv->ring[BCS];
|
2012-06-05 05:42:55 +08:00
|
|
|
if (ctx_id != 0) {
|
|
|
|
DRM_DEBUG("Ring %s doesn't support contexts\n",
|
|
|
|
ring->name);
|
|
|
|
return -EPERM;
|
|
|
|
}
|
2010-11-26 02:00:26 +08:00
|
|
|
break;
|
|
|
|
default:
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("execbuf with unknown ring: %d\n",
|
2010-11-26 02:00:26 +08:00
|
|
|
(int)(args->flags & I915_EXEC_RING_MASK));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-05-11 21:29:31 +08:00
|
|
|
if (!intel_ring_initialized(ring)) {
|
|
|
|
DRM_DEBUG("execbuf with invalid ring: %d\n",
|
|
|
|
(int)(args->flags & I915_EXEC_RING_MASK));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2010-12-19 19:42:05 +08:00
|
|
|
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
|
2011-12-13 11:21:58 +08:00
|
|
|
mask = I915_EXEC_CONSTANTS_MASK;
|
2010-12-19 19:42:05 +08:00
|
|
|
switch (mode) {
|
|
|
|
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
|
|
|
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
|
|
|
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
|
|
|
if (ring == &dev_priv->ring[RCS] &&
|
|
|
|
mode != dev_priv->relative_constants_mode) {
|
|
|
|
if (INTEL_INFO(dev)->gen < 4)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (INTEL_INFO(dev)->gen > 5 &&
|
|
|
|
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
|
|
|
|
return -EINVAL;
|
2011-12-13 11:21:58 +08:00
|
|
|
|
|
|
|
/* The HW changed the meaning on this bit on gen6 */
|
|
|
|
if (INTEL_INFO(dev)->gen >= 6)
|
|
|
|
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
|
2010-12-19 19:42:05 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
|
2010-12-19 19:42:05 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
if (args->buffer_count < 1) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
|
2010-11-26 02:00:26 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (args->num_cliprects != 0) {
|
2010-12-04 19:30:53 +08:00
|
|
|
if (ring != &dev_priv->ring[RCS]) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
|
2010-11-30 22:10:25 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-04-27 05:28:11 +08:00
|
|
|
if (INTEL_INFO(dev)->gen >= 5) {
|
|
|
|
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-04-23 16:06:42 +08:00
|
|
|
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
|
|
|
|
DRM_DEBUG("execbuf with %u cliprects\n",
|
|
|
|
args->num_cliprects);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-05-08 19:39:59 +08:00
|
|
|
|
2010-11-26 03:32:06 +08:00
|
|
|
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
|
2010-11-26 02:00:26 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (cliprects == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
|
2010-11-26 03:32:06 +08:00
|
|
|
if (copy_from_user(cliprects,
|
|
|
|
(struct drm_clip_rect __user *)(uintptr_t)
|
|
|
|
args->cliprects_ptr,
|
|
|
|
sizeof(*cliprects)*args->num_cliprects)) {
|
2010-11-26 02:00:26 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = i915_mutex_lock_interruptible(dev);
|
|
|
|
if (ret)
|
|
|
|
goto pre_mutex_err;
|
|
|
|
|
|
|
|
if (dev_priv->mm.suspended) {
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
|
2013-01-08 18:53:17 +08:00
|
|
|
eb = eb_create(args);
|
2010-12-08 18:38:14 +08:00
|
|
|
if (eb == NULL) {
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto pre_mutex_err;
|
|
|
|
}
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
/* Look up object handles */
|
2013-01-08 18:53:17 +08:00
|
|
|
ret = eb_lookup_objects(eb, exec, args, file);
|
2013-01-08 18:53:14 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2011-01-11 01:35:37 +08:00
|
|
|
/* take note of the batch buffer before we might reorder the lists */
|
2013-01-08 18:53:15 +08:00
|
|
|
batch_obj = list_entry(eb->objects.prev,
|
2011-01-11 01:35:37 +08:00
|
|
|
struct drm_i915_gem_object,
|
|
|
|
exec_list);
|
|
|
|
|
2010-11-26 02:00:26 +08:00
|
|
|
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
2013-01-18 05:23:36 +08:00
|
|
|
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
|
|
|
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/* The objects are in their final locations, apply the relocations. */
|
2013-01-18 05:23:36 +08:00
|
|
|
if (need_relocs)
|
|
|
|
ret = i915_gem_execbuffer_relocate(dev, eb);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (ret) {
|
|
|
|
if (ret == -EFAULT) {
|
2013-01-18 05:23:36 +08:00
|
|
|
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
|
|
|
|
eb, exec);
|
2010-11-26 02:00:26 +08:00
|
|
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
|
}
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the pending read domains for the batch buffer to COMMAND */
|
|
|
|
if (batch_obj->base.pending_write_domain) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
|
2010-11-26 02:00:26 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
|
|
|
|
|
2012-10-17 19:09:54 +08:00
|
|
|
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
|
|
|
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
|
|
|
* hsw should have this fixed, but let's be paranoid and do it
|
|
|
|
* unconditionally for now. */
|
|
|
|
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
|
|
|
|
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
|
|
|
|
|
2013-01-08 18:53:15 +08:00
|
|
|
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
|
2010-11-26 03:32:06 +08:00
|
|
|
if (ret)
|
2010-11-26 02:00:26 +08:00
|
|
|
goto err;
|
|
|
|
|
2012-07-24 03:33:55 +08:00
|
|
|
ret = i915_switch_context(ring, file, ctx_id);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2011-12-13 11:21:57 +08:00
|
|
|
if (ring == &dev_priv->ring[RCS] &&
|
|
|
|
mode != dev_priv->relative_constants_mode) {
|
|
|
|
ret = intel_ring_begin(ring, 4);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
intel_ring_emit(ring, MI_NOOP);
|
|
|
|
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
|
|
|
intel_ring_emit(ring, INSTPM);
|
2011-12-13 11:21:58 +08:00
|
|
|
intel_ring_emit(ring, mask << 16 | mode);
|
2011-12-13 11:21:57 +08:00
|
|
|
intel_ring_advance(ring);
|
|
|
|
|
|
|
|
dev_priv->relative_constants_mode = mode;
|
|
|
|
}
|
|
|
|
|
2012-01-04 01:23:29 +08:00
|
|
|
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
|
|
|
ret = i915_reset_gen7_sol_offsets(dev, ring);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2010-11-30 22:10:25 +08:00
|
|
|
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
|
|
|
|
exec_len = args->batch_len;
|
|
|
|
if (cliprects) {
|
|
|
|
for (i = 0; i < args->num_cliprects; i++) {
|
|
|
|
ret = i915_emit_box(dev, &cliprects[i],
|
|
|
|
args->DR1, args->DR4);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
ret = ring->dispatch_execbuffer(ring,
|
2012-10-17 19:09:54 +08:00
|
|
|
exec_start, exec_len,
|
|
|
|
flags);
|
2010-11-30 22:10:25 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
} else {
|
2012-10-17 19:09:54 +08:00
|
|
|
ret = ring->dispatch_execbuffer(ring,
|
|
|
|
exec_start, exec_len,
|
|
|
|
flags);
|
2010-11-30 22:10:25 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
2010-11-26 02:00:26 +08:00
|
|
|
|
2012-11-28 00:22:52 +08:00
|
|
|
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
|
|
|
|
|
2013-01-08 18:53:15 +08:00
|
|
|
i915_gem_execbuffer_move_to_active(&eb->objects, ring);
|
2010-11-26 03:32:06 +08:00
|
|
|
i915_gem_execbuffer_retire_commands(dev, file, ring);
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
err:
|
2010-12-08 18:38:14 +08:00
|
|
|
eb_destroy(eb);
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
pre_mutex_err:
|
|
|
|
kfree(cliprects);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Legacy execbuffer just creates an exec2 list from the original exec object
|
|
|
|
* list array and passes it to the real function.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_execbuffer *args = data;
|
|
|
|
struct drm_i915_gem_execbuffer2 exec2;
|
|
|
|
struct drm_i915_gem_exec_object *exec_list = NULL;
|
|
|
|
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
if (args->buffer_count < 1) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
|
2010-11-26 02:00:26 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy in the exec list from userland */
|
|
|
|
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
|
|
|
|
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
|
|
|
|
if (exec_list == NULL || exec2_list == NULL) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
|
2010-11-26 02:00:26 +08:00
|
|
|
args->buffer_count);
|
|
|
|
drm_free_large(exec_list);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ret = copy_from_user(exec_list,
|
2012-09-14 18:46:00 +08:00
|
|
|
(void __user *)(uintptr_t)args->buffers_ptr,
|
2010-11-26 02:00:26 +08:00
|
|
|
sizeof(*exec_list) * args->buffer_count);
|
|
|
|
if (ret != 0) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("copy %d exec entries failed %d\n",
|
2010-11-26 02:00:26 +08:00
|
|
|
args->buffer_count, ret);
|
|
|
|
drm_free_large(exec_list);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
exec2_list[i].handle = exec_list[i].handle;
|
|
|
|
exec2_list[i].relocation_count = exec_list[i].relocation_count;
|
|
|
|
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
|
|
|
|
exec2_list[i].alignment = exec_list[i].alignment;
|
|
|
|
exec2_list[i].offset = exec_list[i].offset;
|
|
|
|
if (INTEL_INFO(dev)->gen < 4)
|
|
|
|
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
|
|
|
|
else
|
|
|
|
exec2_list[i].flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
exec2.buffers_ptr = args->buffers_ptr;
|
|
|
|
exec2.buffer_count = args->buffer_count;
|
|
|
|
exec2.batch_start_offset = args->batch_start_offset;
|
|
|
|
exec2.batch_len = args->batch_len;
|
|
|
|
exec2.DR1 = args->DR1;
|
|
|
|
exec2.DR4 = args->DR4;
|
|
|
|
exec2.num_cliprects = args->num_cliprects;
|
|
|
|
exec2.cliprects_ptr = args->cliprects_ptr;
|
|
|
|
exec2.flags = I915_EXEC_RENDER;
|
2012-06-05 05:42:55 +08:00
|
|
|
i915_execbuffer2_set_context_id(exec2, 0);
|
2010-11-26 02:00:26 +08:00
|
|
|
|
|
|
|
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
|
|
|
|
if (!ret) {
|
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
|
|
|
for (i = 0; i < args->buffer_count; i++)
|
|
|
|
exec_list[i].offset = exec2_list[i].offset;
|
|
|
|
/* ... and back out to userspace */
|
2012-09-14 18:46:00 +08:00
|
|
|
ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
|
2010-11-26 02:00:26 +08:00
|
|
|
exec_list,
|
|
|
|
sizeof(*exec_list) * args->buffer_count);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EFAULT;
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("failed to copy %d exec entries "
|
2010-11-26 02:00:26 +08:00
|
|
|
"back to user (%d)\n",
|
|
|
|
args->buffer_count, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_free_large(exec_list);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_execbuffer2 *args = data;
|
|
|
|
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
|
|
|
|
int ret;
|
|
|
|
|
2012-04-23 16:06:41 +08:00
|
|
|
if (args->buffer_count < 1 ||
|
|
|
|
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
|
2010-11-26 02:00:26 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-02-21 20:54:48 +08:00
|
|
|
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
|
2013-01-08 18:53:13 +08:00
|
|
|
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
2011-02-21 20:54:48 +08:00
|
|
|
if (exec2_list == NULL)
|
|
|
|
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
|
|
|
|
args->buffer_count);
|
2010-11-26 02:00:26 +08:00
|
|
|
if (exec2_list == NULL) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
|
2010-11-26 02:00:26 +08:00
|
|
|
args->buffer_count);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ret = copy_from_user(exec2_list,
|
|
|
|
(struct drm_i915_relocation_entry __user *)
|
|
|
|
(uintptr_t) args->buffers_ptr,
|
|
|
|
sizeof(*exec2_list) * args->buffer_count);
|
|
|
|
if (ret != 0) {
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("copy %d exec entries failed %d\n",
|
2010-11-26 02:00:26 +08:00
|
|
|
args->buffer_count, ret);
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
|
|
|
|
if (!ret) {
|
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
2012-09-14 18:46:00 +08:00
|
|
|
ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
|
2010-11-26 02:00:26 +08:00
|
|
|
exec2_list,
|
|
|
|
sizeof(*exec2_list) * args->buffer_count);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EFAULT;
|
2012-02-01 04:08:14 +08:00
|
|
|
DRM_DEBUG("failed to copy %d exec entries "
|
2010-11-26 02:00:26 +08:00
|
|
|
"back to user (%d)\n",
|
|
|
|
args->buffer_count, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_free_large(exec2_list);
|
|
|
|
return ret;
|
|
|
|
}
|