Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4103 → Rev 4104

/drivers/video/drm/i915/i915_gem_context.c
113,7 → 113,7
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
124,10 → 124,10
return ret;
}
 
static void do_destroy(struct i915_hw_context *ctx)
void i915_gem_context_free(struct kref *ctx_ref)
{
if (ctx->file_priv)
idr_remove(&ctx->file_priv->context_idr, ctx->id);
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
 
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
145,6 → 145,7
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
 
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
if (ctx->obj == NULL) {
kfree(ctx);
154,8 → 155,9
 
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_LLC_MLC);
if (ret)
I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
goto err_out;
}
 
169,18 → 171,18
if (file_priv == NULL)
return ctx;
 
ctx->file_priv = file_priv;
 
ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
GFP_KERNEL);
if (ret < 0)
goto err_out;
 
ctx->file_priv = file_priv;
ctx->id = ret;
 
return ctx;
 
err_out:
do_destroy(ctx);
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
 
212,13 → 214,17
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret)
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
}
 
ret = do_switch(ctx);
if (ret)
if (ret) {
DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
goto err_unpin;
}
 
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
226,7 → 232,7
err_unpin:
i915_gem_object_unpin(ctx->obj);
err_destroy:
do_destroy(ctx);
i915_gem_context_unreference(ctx);
return ret;
}
 
236,6 → 242,7
 
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
return;
}
 
248,11 → 255,13
 
if (dev_priv->hw_context_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
return;
}
 
if (create_default_context(dev_priv)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
return;
}
 
262,6 → 271,7
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
 
if (dev_priv->hw_contexts_disabled)
return;
269,11 → 279,16
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
// intel_gpu_reset(dev);
intel_gpu_reset(dev);
 
i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
i915_gem_object_unpin(dctx->obj);
 
do_destroy(dev_priv->ring[RCS].default_context);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
}
 
static int context_idr_cleanup(int id, void *p, void *data)
282,8 → 297,8
 
BUG_ON(id == DEFAULT_CONTEXT_ID);
 
do_destroy(ctx);
 
 
return 0;
}
 
325,6 → 340,7
if (ret)
return ret;
 
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
if (IS_GEN7(ring->dev))
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
332,7 → 348,7
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, new_context->obj->gtt_offset |
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
353,16 → 369,16
static int do_switch(struct i915_hw_context *to)
{
struct intel_ring_buffer *ring = to->ring;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
 
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
 
if (from_obj == to->obj)
if (from == to)
return 0;
 
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
 
382,7 → 398,7
 
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
else if (WARN_ON_ONCE(from == to)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
 
ret = mi_set_context(ring, to, hw_flags);
397,9 → 413,12
* is a bit suboptimal because the retiring can occur simply after the
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring);
if (from != NULL) {
struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
407,15 → 426,26
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
from_obj->dirty = 1;
BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj);
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
 
drm_gem_object_unreference(&from_obj->base);
ret = i915_add_request(ring, NULL);
if (ret) {
/* Too late, we've already scheduled a context switch.
* Try to undo the change so that the hw state is
* consistent with out tracking. In case of emergency,
* scream.
*/
WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
return ret;
}
 
drm_gem_object_reference(&to->obj->base);
ring->last_context_obj = to->obj;
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}
 
i915_gem_context_reference(to);
ring->last_context = to;
to->is_initialized = true;
 
return 0;
444,6 → 474,8
if (dev_priv->hw_contexts_disabled)
return 0;
 
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
if (ring != &dev_priv->ring[RCS])
return 0;
 
513,7 → 545,6
return -ENOENT;
}
 
do_destroy(ctx);
 
mutex_unlock(&dev->struct_mutex);