1922,6 → 1922,17 |
return 0; |
} |
|
static void cleanup_phys_status_page(struct intel_engine_cs *ring) |
{ |
struct drm_i915_private *dev_priv = to_i915(ring->dev); |
|
if (!dev_priv->status_page_dmah) |
return; |
|
drm_pci_free(ring->dev, dev_priv->status_page_dmah); |
ring->status_page.page_addr = NULL; |
} |
|
static void cleanup_status_page(struct intel_engine_cs *ring) |
{ |
struct drm_i915_gem_object *obj; |
1938,9 → 1949,9 |
|
static int init_status_page(struct intel_engine_cs *ring) |
{ |
struct drm_i915_gem_object *obj; |
struct drm_i915_gem_object *obj = ring->status_page.obj; |
|
if ((obj = ring->status_page.obj) == NULL) { |
if (obj == NULL) { |
unsigned flags; |
int ret; |
|
2134,7 → 2145,7 |
if (ret) |
goto error; |
} else { |
BUG_ON(ring->id != RCS); |
WARN_ON(ring->id != RCS); |
ret = init_phys_status_page(ring); |
if (ret) |
goto error; |
2179,7 → 2190,12 |
if (ring->cleanup) |
ring->cleanup(ring); |
|
if (I915_NEED_GFX_HWS(ring->dev)) { |
cleanup_status_page(ring); |
} else { |
WARN_ON(ring->id != RCS); |
cleanup_phys_status_page(ring); |
} |
|
i915_cmd_parser_fini_ring(ring); |
i915_gem_batch_pool_fini(&ring->batch_pool); |
2341,11 → 2357,11 |
if (unlikely(total_bytes > remain_usable)) { |
/* |
* The base request will fit but the reserved space |
* falls off the end. So only need to to wait for the |
* reserved size after flushing out the remainder. |
* falls off the end. So don't need an immediate wrap |
* and only need to effectively wait for the reserved |
* size space from the start of ringbuffer. |
*/ |
wait_bytes = remain_actual + ringbuf->reserved_size; |
need_wrap = true; |
} else if (total_bytes > ringbuf->space) { |
/* No wrapping required, just waiting. */ |
wait_bytes = total_bytes; |