diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 444a8ea0c5c4..831da9f43324 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2278,6 +2278,9 @@ struct drm_i915_gem_request { /** Position in the ringbuffer of the end of the whole request */ u32 tail; + /** Preallocate space in the ringbuffer for the emitting the request */ + u32 reserved_space; + /** * Context and ring buffer related to this request * Contexts are refcounted, so when this request is associated with a diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2dfdfe55d3bc..53c4a068245c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2576,6 +2576,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, struct drm_i915_private *dev_priv; struct intel_ringbuffer *ringbuf; u32 request_start; + u32 reserved_tail; int ret; if (WARN_ON(request == NULL)) @@ -2590,9 +2591,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, * should already have been reserved in the ring buffer. Let the ring * know that it is time to use that space up. */ - intel_ring_reserved_space_use(ringbuf); - request_start = intel_ring_get_tail(ringbuf); + reserved_tail = request->reserved_space; + request->reserved_space = 0; + /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -2656,7 +2658,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, intel_mark_busy(dev_priv->dev); /* Sanity check that the reserved size was large enough. */ - intel_ring_reserved_space_end(ringbuf); + ret = intel_ring_get_tail(ringbuf) - request_start; + if (ret < 0) + ret += ringbuf->size; + WARN_ONCE(ret > reserved_tail, + "Not enough space reserved (%d bytes) " + "for adding the request (%d bytes)\n", + reserved_tail, ret); } static bool i915_context_is_banned(struct drm_i915_private *dev_priv, @@ -2777,17 +2785,14 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, * to be redone if the request is not actually submitted straight * away, e.g. because a GPU scheduler has deferred it. */ - if (i915.enable_execlists) - ret = intel_logical_ring_reserve_space(req); - else - ret = intel_ring_reserve_space(req); + req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; + ret = intel_ring_begin(req, 0); if (ret) { /* * At this point, the request is fully allocated even if not * fully prepared. Thus it can be cleaned up using the proper - * free code. + * free code, along with any reserved space. */ - intel_ring_reserved_space_cancel(req->ringbuf); i915_gem_request_unreference(req); return ret; } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index b07daf227432..c60f4fe36537 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -772,21 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) return 0; } -int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) -{ - /* - * The first call merely notes the reserve request and is common for - * all back ends. The subsequent localised _begin() call actually - * ensures that the reservation is available. Without the begin, if - * the request creator immediately submitted the request without - * adding any commands to it then there might not actually be - * sufficient room for the submission commands. - */ - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); - - return intel_ring_begin(request, 0); -} - /** * execlists_submission() - submit a batchbuffer for execution, Execlists style * @dev: DRM device. diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 87822b6fc0f1..0df660ddda0d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2343,44 +2343,6 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) return 0; } -int intel_ring_reserve_space(struct drm_i915_gem_request *request) -{ - /* - * The first call merely notes the reserve request and is common for - * all back ends. The subsequent localised _begin() call actually - * ensures that the reservation is available. Without the begin, if - * the request creator immediately submitted the request without - * adding any commands to it then there might not actually be - * sufficient room for the submission commands. - */ - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); - - return intel_ring_begin(request, 0); -} - -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) -{ - GEM_BUG_ON(ringbuf->reserved_size); - ringbuf->reserved_size = size; -} - -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} - -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} - -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(ringbuf->reserved_size); -} - static int wait_for_space(struct drm_i915_gem_request *req, int bytes) { struct intel_ringbuffer *ringbuf = req->ringbuf; @@ -2400,7 +2362,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) * * See also i915_gem_request_alloc() and i915_add_request(). */ - GEM_BUG_ON(!ringbuf->reserved_size); + GEM_BUG_ON(!req->reserved_space); list_for_each_entry(target, &engine->request_list, list) { unsigned space; @@ -2435,7 +2397,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int total_bytes, wait_bytes; bool need_wrap = false; - total_bytes = bytes + ringbuf->reserved_size; + total_bytes = bytes + req->reserved_space; if (unlikely(bytes > remain_usable)) { /* @@ -2451,7 +2413,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) * and only need to effectively wait for the reserved * size space from the start of ringbuffer. */ - wait_bytes = remain_actual + ringbuf->reserved_size; + wait_bytes = remain_actual + req->reserved_space; } else { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 201e7752d765..038914ccc6fd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -107,7 +107,6 @@ struct intel_ringbuffer { int space; int size; int effective_size; - int reserved_size; /** We track the position of the requests in the ring buffer, and * when each is retired we increment last_retired_head as the GPU @@ -491,20 +490,4 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) */ #define MIN_SPACE_FOR_ADD_REQUEST 160 -/* - * Reserve space in the ring to guarantee that the i915_add_request() call - * will always have sufficient room to do its stuff. The request creation - * code calls this automatically. - */ -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); -/* Cancel the reservation, e.g. because the request is being discarded. */ -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); -/* Use the reserved space - for use by i915_add_request() only. */ -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); -/* Finish with the reserved space - for use by i915_add_request() only. */ -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); - -/* Legacy ringbuffer specific portion of reservation code: */ -int intel_ring_reserve_space(struct drm_i915_gem_request *request); - #endif /* _INTEL_RINGBUFFER_H_ */