mirror of https://gitee.com/openkylin/linux.git
drm/i915: Move obj->active:5 to obj->flags
We are motivated to avoid using a bitfield for obj->active for a couple of reasons. Firstly, we wish to document our lockless read of obj->active using READ_ONCE inside i915_gem_busy_ioctl() and that requires an integral type (i.e. not a bitfield). Secondly, gcc produces abysmal code when presented with a bitfield and that shows up high on the profiles of request tracking (mainly due to excess memory traffic as it converts the bitfield to a register and back and generates frequent AGI in the process). v2: BIT, break up a long line in compute the other engines, new paint for i915_gem_object_is_active (now i915_gem_object_get_active). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-23-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
parent
5748b6a1f4
commit
573adb3962
|
@ -91,7 +91,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
||||||
|
|
||||||
static char get_active_flag(struct drm_i915_gem_object *obj)
|
static char get_active_flag(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
return obj->active ? '*' : ' ';
|
return i915_gem_object_is_active(obj) ? '*' : ' ';
|
||||||
}
|
}
|
||||||
|
|
||||||
static char get_pin_flag(struct drm_i915_gem_object *obj)
|
static char get_pin_flag(struct drm_i915_gem_object *obj)
|
||||||
|
|
|
@ -2155,12 +2155,16 @@ struct drm_i915_gem_object {
|
||||||
|
|
||||||
struct list_head batch_pool_link;
|
struct list_head batch_pool_link;
|
||||||
|
|
||||||
|
unsigned long flags;
|
||||||
/**
|
/**
|
||||||
* This is set if the object is on the active lists (has pending
|
* This is set if the object is on the active lists (has pending
|
||||||
* rendering and so a non-zero seqno), and is not set if it i s on
|
* rendering and so a non-zero seqno), and is not set if it i s on
|
||||||
* inactive (ready to be unbound) list.
|
* inactive (ready to be unbound) list.
|
||||||
*/
|
*/
|
||||||
unsigned int active:I915_NUM_ENGINES;
|
#define I915_BO_ACTIVE_SHIFT 0
|
||||||
|
#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
|
||||||
|
#define __I915_BO_ACTIVE(bo) \
|
||||||
|
((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is set if the object has been written to since last bound
|
* This is set if the object has been written to since last bound
|
||||||
|
@ -2325,6 +2329,37 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
|
||||||
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
|
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long
|
||||||
|
i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
|
||||||
|
{
|
||||||
|
return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
|
||||||
|
{
|
||||||
|
return i915_gem_object_get_active(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
|
||||||
|
{
|
||||||
|
obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
|
||||||
|
{
|
||||||
|
obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
|
||||||
|
int engine)
|
||||||
|
{
|
||||||
|
return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Optimised SGL iterator for GEM objects
|
* Optimised SGL iterator for GEM objects
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1358,7 +1358,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||||
|
|
||||||
if (!readonly) {
|
if (!readonly) {
|
||||||
active = obj->last_read;
|
active = obj->last_read;
|
||||||
active_mask = obj->active;
|
active_mask = i915_gem_object_get_active(obj);
|
||||||
} else {
|
} else {
|
||||||
active_mask = 1;
|
active_mask = 1;
|
||||||
active = &obj->last_write;
|
active = &obj->last_write;
|
||||||
|
@ -1402,7 +1402,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
||||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||||
BUG_ON(!dev_priv->mm.interruptible);
|
BUG_ON(!dev_priv->mm.interruptible);
|
||||||
|
|
||||||
active_mask = obj->active;
|
active_mask = i915_gem_object_get_active(obj);
|
||||||
if (!active_mask)
|
if (!active_mask)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -2365,10 +2365,10 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
|
||||||
struct drm_i915_gem_object *obj =
|
struct drm_i915_gem_object *obj =
|
||||||
container_of(active, struct drm_i915_gem_object, last_read[idx]);
|
container_of(active, struct drm_i915_gem_object, last_read[idx]);
|
||||||
|
|
||||||
GEM_BUG_ON((obj->active & (1 << idx)) == 0);
|
GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
|
||||||
|
|
||||||
obj->active &= ~(1 << idx);
|
i915_gem_object_clear_active(obj, idx);
|
||||||
if (obj->active)
|
if (i915_gem_object_is_active(obj))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Bump our place on the bound list to keep it roughly in LRU order
|
/* Bump our place on the bound list to keep it roughly in LRU order
|
||||||
|
@ -2672,7 +2672,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!obj->active)
|
if (!i915_gem_object_is_active(obj))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||||
|
@ -2760,7 +2760,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||||
|
|
||||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||||
|
|
||||||
active_mask = obj->active;
|
active_mask = i915_gem_object_get_active(obj);
|
||||||
if (!active_mask)
|
if (!active_mask)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -3811,7 +3811,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||||
* become non-busy without any further actions.
|
* become non-busy without any further actions.
|
||||||
*/
|
*/
|
||||||
args->busy = 0;
|
args->busy = 0;
|
||||||
if (obj->active) {
|
if (i915_gem_object_is_active(obj)) {
|
||||||
struct drm_i915_gem_request *req;
|
struct drm_i915_gem_request *req;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
|
@ -434,7 +434,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
|
||||||
|
|
||||||
static bool object_is_idle(struct drm_i915_gem_object *obj)
|
static bool object_is_idle(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
unsigned long active = obj->active;
|
unsigned long active = i915_gem_object_get_active(obj);
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
for_each_active(active, idx) {
|
for_each_active(active, idx) {
|
||||||
|
@ -990,11 +990,21 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
|
||||||
|
{
|
||||||
|
unsigned int mask;
|
||||||
|
|
||||||
|
mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
|
||||||
|
mask <<= I915_BO_ACTIVE_SHIFT;
|
||||||
|
|
||||||
|
return mask;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||||
struct list_head *vmas)
|
struct list_head *vmas)
|
||||||
{
|
{
|
||||||
const unsigned other_rings = ~intel_engine_flag(req->engine);
|
const unsigned int other_rings = eb_other_engines(req);
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
uint32_t flush_domains = 0;
|
uint32_t flush_domains = 0;
|
||||||
bool flush_chipset = false;
|
bool flush_chipset = false;
|
||||||
|
@ -1003,7 +1013,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||||
list_for_each_entry(vma, vmas, exec_list) {
|
list_for_each_entry(vma, vmas, exec_list) {
|
||||||
struct drm_i915_gem_object *obj = vma->obj;
|
struct drm_i915_gem_object *obj = vma->obj;
|
||||||
|
|
||||||
if (obj->active & other_rings) {
|
if (obj->flags & other_rings) {
|
||||||
ret = i915_gem_object_sync(obj, req);
|
ret = i915_gem_object_sync(obj, req);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1166,9 +1176,9 @@ void i915_vma_move_to_active(struct i915_vma *vma,
|
||||||
* add the active reference first and queue for it to be dropped
|
* add the active reference first and queue for it to be dropped
|
||||||
* *last*.
|
* *last*.
|
||||||
*/
|
*/
|
||||||
if (obj->active == 0)
|
if (!i915_gem_object_is_active(obj))
|
||||||
i915_gem_object_get(obj);
|
i915_gem_object_get(obj);
|
||||||
obj->active |= 1 << idx;
|
i915_gem_object_set_active(obj, idx);
|
||||||
i915_gem_active_set(&obj->last_read[idx], req);
|
i915_gem_active_set(&obj->last_read[idx], req);
|
||||||
|
|
||||||
if (flags & EXEC_OBJECT_WRITE) {
|
if (flags & EXEC_OBJECT_WRITE) {
|
||||||
|
|
|
@ -182,7 +182,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||||
!is_vmalloc_addr(obj->mapping))
|
!is_vmalloc_addr(obj->mapping))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
|
if ((flags & I915_SHRINK_ACTIVE) == 0 &&
|
||||||
|
i915_gem_object_is_active(obj))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!can_release_pages(obj))
|
if (!can_release_pages(obj))
|
||||||
|
@ -267,7 +268,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
count += obj->base.size >> PAGE_SHIFT;
|
count += obj->base.size >> PAGE_SHIFT;
|
||||||
|
|
||||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||||
if (!obj->active && can_release_pages(obj))
|
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
|
||||||
count += obj->base.size >> PAGE_SHIFT;
|
count += obj->base.size >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
|
||||||
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
|
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
|
||||||
int i, n;
|
int i, n;
|
||||||
|
|
||||||
if (!obj->active)
|
if (!i915_gem_object_is_active(obj))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
n = 0;
|
n = 0;
|
||||||
|
|
Loading…
Reference in New Issue