drm/i915: Mark a few functions as __must_check

... to benefit from the compiler checking that we remember to handle
and propagate errors.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2010-11-23 15:26:33 +00:00
parent ab5793ad3a
commit 2021746e1d
2 changed files with 58 additions and 56 deletions

View File

@ -1093,11 +1093,11 @@ int i915_gem_init_object(struct drm_gem_object *obj);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@ -1110,37 +1110,42 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
int i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
bool interruptible);
int i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
bool interruptible);
int __must_check i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
bool interruptible);
int __must_check i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
bool interruptible);
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible);
int i915_gem_init_ringbuffer(struct drm_device *dev);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev);
int i915_add_request(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_i915_gem_request *request,
struct intel_ring_buffer *ring);
int i915_do_wait_request(struct drm_device *dev,
uint32_t seqno,
bool interruptible,
struct intel_ring_buffer *ring);
void i915_gem_do_init(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_i915_gem_request *request,
struct intel_ring_buffer *ring);
int __must_check i915_do_wait_request(struct drm_device *dev,
uint32_t seqno,
bool interruptible,
struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
int write);
int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
int id,
@ -1152,14 +1157,16 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file);
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
int i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
int __must_check i915_gem_evict_everything(struct drm_device *dev,
bool purgeable_only);
int __must_check i915_gem_evict_inactive(struct drm_device *dev,
bool purgeable_only);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);

View File

@ -215,27 +215,19 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
return obj->gtt_space && !obj->active && obj->pin_count == 0;
}
int i915_gem_do_init(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
void i915_gem_do_init(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (start >= end ||
(start & (PAGE_SIZE - 1)) != 0 ||
(end & (PAGE_SIZE - 1)) != 0) {
return -EINVAL;
}
drm_mm_init(&dev_priv->mm.gtt_space, start,
end - start);
dev_priv->mm.gtt_total = end - start;
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
dev_priv->mm.gtt_mappable_end = mappable_end;
return 0;
}
int
@ -243,13 +235,16 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_init *args = data;
int ret;
if (args->gtt_start >= args->gtt_end ||
(args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
return -EINVAL;
mutex_lock(&dev->struct_mutex);
ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
return ret;
return 0;
}
int
@ -2949,7 +2944,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
* flushes to occur.
*/
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
int ret;
@ -5177,8 +5172,8 @@ i915_gem_inactive_shrink(struct shrinker *shrinker,
&dev_priv->mm.inactive_list,
mm_list) {
if (i915_gem_object_is_purgeable(obj)) {
i915_gem_object_unbind(obj);
if (--nr_to_scan == 0)
if (i915_gem_object_unbind(obj) == 0 &&
--nr_to_scan == 0)
break;
}
}
@ -5188,10 +5183,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker,
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list,
mm_list) {
if (nr_to_scan) {
i915_gem_object_unbind(obj);
if (nr_to_scan &&
i915_gem_object_unbind(obj) == 0)
nr_to_scan--;
} else
else
cnt++;
}