Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 7143 → Rev 7144

/drivers/video/drm/i915/i915_gem_context.c
142,7 → 142,7
return;
 
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
mm_list) {
vm_link) {
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
break;
}
321,6 → 321,18
return ERR_PTR(ret);
}
 
static void i915_gem_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
if (i915.enable_execlists) {
intel_lr_context_unpin(ctx, engine);
} else {
if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(ctx);
}
}
 
void i915_gem_context_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
329,40 → 341,31
if (i915.enable_execlists) {
struct intel_context *ctx;
 
list_for_each_entry(ctx, &dev_priv->context_list, link) {
list_for_each_entry(ctx, &dev_priv->context_list, link)
intel_lr_context_reset(dev, ctx);
}
 
return;
}
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *lctx = ring->last_context;
 
if (lctx) {
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
 
i915_gem_context_unreference(lctx);
if (ring->last_context) {
i915_gem_context_unpin(ring->last_context, ring);
ring->last_context = NULL;
}
}
 
/* Force the GPU state to be reinitialised on enabling */
if (ring->default_context)
ring->default_context->legacy_hw_ctx.initialized = false;
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
}
}
 
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int i;
 
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->ring[RCS].default_context))
if (WARN_ON(dev_priv->kernel_context))
return 0;
 
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
392,13 → 395,8
return PTR_ERR(ctx);
}
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
dev_priv->kernel_context = ctx;
 
/* NB: RCS will hold a ref for all rings */
ring->default_context = ctx;
}
 
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
dev_priv->hw_context_size ? "HW" : "fake");
408,7 → 406,7
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
struct intel_context *dctx = dev_priv->kernel_context;
int i;
 
if (dctx->legacy_hw_ctx.rcs_state) {
424,28 → 422,21
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
 
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
 
for (i = 0; i < I915_NUM_RINGS; i++) {
for (i = I915_NUM_RINGS; --i >= 0;) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
 
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
 
ring->default_context = NULL;
if (ring->last_context) {
i915_gem_context_unpin(ring->last_context, ring);
ring->last_context = NULL;
}
}
 
i915_gem_context_unreference(dctx);
dev_priv->kernel_context = NULL;
}
 
int i915_gem_context_enable(struct drm_i915_gem_request *req)
864,6 → 855,9
if (!contexts_enabled(dev))
return -ENODEV;
 
if (args->pad != 0)
return -EINVAL;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
887,6 → 881,9
struct intel_context *ctx;
int ret;
 
if (args->pad != 0)
return -EINVAL;
 
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;