Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 6084 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5060 Rev 5354
Line 86... Line 86...
86
 */
86
 */
Line 87... Line 87...
87
 
87
 
88
#include 
88
#include 
89
#include 
89
#include 
-
 
90
#include "i915_drv.h"
Line 90... Line 91...
90
#include "i915_drv.h"
91
#include "i915_trace.h"
91
 
92
 
92
/* This is a HW constraint. The value below is the largest known requirement
93
/* This is a HW constraint. The value below is the largest known requirement
93
 * I've seen in a spec to date, and that was a workaround for a non-shipping
94
 * I've seen in a spec to date, and that was a workaround for a non-shipping
94
 * part. It should be safe to decrease this, but it's more future proof as is.
95
 * part. It should be safe to decrease this, but it's more future proof as is.
95
 */
96
 */
Line 96... Line -...
96
#define GEN6_CONTEXT_ALIGN (64<<10)
-
 
97
#define GEN7_CONTEXT_ALIGN 4096
-
 
98
 
-
 
99
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
-
 
100
{
-
 
101
	struct drm_device *dev = ppgtt->base.dev;
-
 
102
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
103
	struct i915_address_space *vm = &ppgtt->base;
-
 
104
 
-
 
105
	if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
-
 
106
	    (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
-
 
107
		ppgtt->base.cleanup(&ppgtt->base);
-
 
108
		return;
-
 
109
	}
-
 
110
 
-
 
111
	/*
-
 
112
	 * Make sure vmas are unbound before we take down the drm_mm
-
 
113
	 *
-
 
114
	 * FIXME: Proper refcounting should take care of this, this shouldn't be
-
 
115
	 * needed at all.
-
 
116
	 */
-
 
117
	if (!list_empty(&vm->active_list)) {
-
 
118
		struct i915_vma *vma;
-
 
119
 
-
 
120
		list_for_each_entry(vma, &vm->active_list, mm_list)
-
 
121
			if (WARN_ON(list_empty(&vma->vma_link) ||
-
 
122
				    list_is_singular(&vma->vma_link)))
-
 
123
				break;
-
 
124
 
-
 
125
		i915_gem_evict_vm(&ppgtt->base, true);
-
 
126
	} else {
-
 
127
		i915_gem_retire_requests(dev);
-
 
128
		i915_gem_evict_vm(&ppgtt->base, false);
-
 
129
	}
-
 
130
 
-
 
131
	ppgtt->base.cleanup(&ppgtt->base);
-
 
132
}
-
 
133
 
-
 
134
static void ppgtt_release(struct kref *kref)
-
 
135
{
-
 
136
	struct i915_hw_ppgtt *ppgtt =
-
 
137
		container_of(kref, struct i915_hw_ppgtt, ref);
-
 
138
 
-
 
139
	do_ppgtt_cleanup(ppgtt);
-
 
140
	kfree(ppgtt);
97
#define GEN6_CONTEXT_ALIGN (64<<10)
141
}
98
#define GEN7_CONTEXT_ALIGN 4096
142
 
99
 
143
static size_t get_context_alignment(struct drm_device *dev)
100
static size_t get_context_alignment(struct drm_device *dev)
Line 178... Line 135...
178
 
135
 
179
void i915_gem_context_free(struct kref *ctx_ref)
136
void i915_gem_context_free(struct kref *ctx_ref)
180
{
137
{
181
	struct intel_context *ctx = container_of(ctx_ref,
138
	struct intel_context *ctx = container_of(ctx_ref,
182
						   typeof(*ctx), ref);
-
 
Line 183... Line 139...
183
	struct i915_hw_ppgtt *ppgtt = NULL;
139
						   typeof(*ctx), ref);
184
 
-
 
-
 
140
 
185
	if (ctx->legacy_hw_ctx.rcs_state) {
141
	trace_i915_context_free(ctx);
186
		/* We refcount even the aliasing PPGTT to keep the code symmetric */
142
 
187
		if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
143
	if (i915.enable_execlists)
-
 
144
		intel_lr_context_free(ctx);
Line 188... Line -...
188
			ppgtt = ctx_to_ppgtt(ctx);
-
 
189
	}
-
 
190
 
145
 
191
	if (ppgtt)
146
	i915_ppgtt_put(ctx->ppgtt);
192
		kref_put(&ppgtt->ref, ppgtt_release);
147
 
193
	if (ctx->legacy_hw_ctx.rcs_state)
148
	if (ctx->legacy_hw_ctx.rcs_state)
194
		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
149
		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
Line 195... Line 150...
195
	list_del(&ctx->link);
150
	list_del(&ctx->link);
196
	kfree(ctx);
151
	kfree(ctx);
197
}
152
}
198
 
153
 
199
static struct drm_i915_gem_object *
154
struct drm_i915_gem_object *
Line 224... Line 179...
224
	}
179
	}
Line 225... Line 180...
225
 
180
 
226
	return obj;
181
	return obj;
Line 227... Line -...
227
}
-
 
228
 
-
 
229
static struct i915_hw_ppgtt *
-
 
230
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
-
 
231
{
-
 
232
	struct i915_hw_ppgtt *ppgtt;
-
 
233
	int ret;
-
 
234
 
-
 
235
	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-
 
236
	if (!ppgtt)
-
 
237
		return ERR_PTR(-ENOMEM);
-
 
238
 
-
 
239
	ret = i915_gem_init_ppgtt(dev, ppgtt);
-
 
240
	if (ret) {
-
 
241
		kfree(ppgtt);
-
 
242
		return ERR_PTR(ret);
-
 
243
	}
-
 
244
 
-
 
245
	ppgtt->ctx = ctx;
-
 
246
	return ppgtt;
-
 
247
}
182
}
248
 
183
 
249
static struct intel_context *
184
static struct intel_context *
250
__create_hw_context(struct drm_device *dev,
185
__create_hw_context(struct drm_device *dev,
251
		  struct drm_i915_file_private *file_priv)
186
		  struct drm_i915_file_private *file_priv)
Line 299... Line 234...
299
 * context state of the GPU for applications that don't utilize HW contexts, as
234
 * context state of the GPU for applications that don't utilize HW contexts, as
300
 * well as an idle case.
235
 * well as an idle case.
301
 */
236
 */
302
static struct intel_context *
237
static struct intel_context *
303
i915_gem_create_context(struct drm_device *dev,
238
i915_gem_create_context(struct drm_device *dev,
304
			struct drm_i915_file_private *file_priv,
239
			struct drm_i915_file_private *file_priv)
305
			bool create_vm)
-
 
306
{
240
{
307
	const bool is_global_default_ctx = file_priv == NULL;
241
	const bool is_global_default_ctx = file_priv == NULL;
308
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
309
	struct intel_context *ctx;
242
	struct intel_context *ctx;
310
	int ret = 0;
243
	int ret = 0;
Line 311... Line 244...
311
 
244
 
Line 329... Line 262...
329
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
262
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
330
		goto err_destroy;
263
		goto err_destroy;
331
	}
264
	}
332
	}
265
	}
Line 333... Line 266...
333
 
266
 
334
	if (create_vm) {
267
	if (USES_FULL_PPGTT(dev)) {
Line 335... Line 268...
335
		struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
268
		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
336
 
269
 
337
		if (IS_ERR_OR_NULL(ppgtt)) {
270
		if (IS_ERR_OR_NULL(ppgtt)) {
338
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
271
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
339
					 PTR_ERR(ppgtt));
272
					 PTR_ERR(ppgtt));
340
			ret = PTR_ERR(ppgtt);
-
 
341
			goto err_unpin;
-
 
342
		} else
-
 
343
			ctx->vm = &ppgtt->base;
-
 
344
 
-
 
345
		/* This case is reserved for the global default context and
-
 
346
		 * should only happen once. */
-
 
347
		if (is_global_default_ctx) {
-
 
348
			if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
-
 
349
				ret = -EEXIST;
273
			ret = PTR_ERR(ppgtt);
Line 350... Line 274...
350
		goto err_unpin;
274
			goto err_unpin;
351
	}
275
		}
352
 
-
 
353
			dev_priv->mm.aliasing_ppgtt = ppgtt;
-
 
354
		}
-
 
355
	} else if (USES_PPGTT(dev)) {
-
 
356
		/* For platforms which only have aliasing PPGTT, we fake the
-
 
357
		 * address space and refcounting. */
276
 
358
		ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
277
		ctx->ppgtt = ppgtt;
Line 359... Line 278...
359
		kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
278
	}
Line 360... Line 279...
360
	} else
279
 
361
		ctx->vm = &dev_priv->gtt.base;
280
	trace_i915_context_create(ctx);
Line 373... Line 292...
373
void i915_gem_context_reset(struct drm_device *dev)
292
void i915_gem_context_reset(struct drm_device *dev)
374
{
293
{
375
	struct drm_i915_private *dev_priv = dev->dev_private;
294
	struct drm_i915_private *dev_priv = dev->dev_private;
376
	int i;
295
	int i;
Line 377... Line 296...
377
 
296
 
-
 
297
	/* In execlists mode we will unreference the context when the execlist
-
 
298
	 * queue is cleared and the requests destroyed.
378
	/* Prevent the hardware from restoring the last context (which hung) on
299
	 */
-
 
300
	if (i915.enable_execlists)
-
 
301
		return;
379
	 * the next switch */
302
 
380
	for (i = 0; i < I915_NUM_RINGS; i++) {
303
	for (i = 0; i < I915_NUM_RINGS; i++) {
381
		struct intel_engine_cs *ring = &dev_priv->ring[i];
-
 
382
		struct intel_context *dctx = ring->default_context;
304
		struct intel_engine_cs *ring = &dev_priv->ring[i];
Line 383... Line -...
383
		struct intel_context *lctx = ring->last_context;
-
 
384
 
-
 
385
		/* Do a fake switch to the default context */
-
 
386
		if (lctx == dctx)
-
 
387
			continue;
305
		struct intel_context *lctx = ring->last_context;
388
 
-
 
389
		if (!lctx)
-
 
390
			continue;
-
 
391
 
-
 
392
		if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
-
 
393
			WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
-
 
394
						      get_context_alignment(dev), 0));
-
 
395
			/* Fake a finish/inactive */
-
 
396
			dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
-
 
397
			dctx->legacy_hw_ctx.rcs_state->active = 0;
-
 
398
		}
306
 
399
 
307
		if (lctx) {
Line 400... Line 308...
400
		if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
308
		if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
401
			i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
-
 
402
 
309
			i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
-
 
310
 
403
		i915_gem_context_unreference(lctx);
311
		i915_gem_context_unreference(lctx);
404
		i915_gem_context_reference(dctx);
312
			ring->last_context = NULL;
Line 405... Line 313...
405
		ring->last_context = dctx;
313
		}
406
	}
314
	}
Line 415... Line 323...
415
	/* Init should only be called once per module load. Eventually the
323
	/* Init should only be called once per module load. Eventually the
416
	 * restriction on the context_disabled check can be loosened. */
324
	 * restriction on the context_disabled check can be loosened. */
417
	if (WARN_ON(dev_priv->ring[RCS].default_context))
325
	if (WARN_ON(dev_priv->ring[RCS].default_context))
418
		return 0;
326
		return 0;
Line -... Line 327...
-
 
327
 
-
 
328
	if (i915.enable_execlists) {
-
 
329
		/* NB: intentionally left blank. We will allocate our own
-
 
330
		 * backing objects as we need them, thank you very much */
419
 
331
		dev_priv->hw_context_size = 0;
420
	if (HAS_HW_CONTEXTS(dev)) {
332
	} else if (HAS_HW_CONTEXTS(dev)) {
421
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
333
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
422
	if (dev_priv->hw_context_size > (1<<20)) {
334
	if (dev_priv->hw_context_size > (1<<20)) {
423
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
335
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
424
					 dev_priv->hw_context_size);
336
					 dev_priv->hw_context_size);
425
			dev_priv->hw_context_size = 0;
337
			dev_priv->hw_context_size = 0;
426
		}
338
		}
Line 427... Line 339...
427
	}
339
	}
428
 
340
 
429
	ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
341
	ctx = i915_gem_create_context(dev, NULL);
430
	if (IS_ERR(ctx)) {
342
	if (IS_ERR(ctx)) {
431
		DRM_ERROR("Failed to create default global context (error %ld)\n",
343
		DRM_ERROR("Failed to create default global context (error %ld)\n",
432
			  PTR_ERR(ctx));
344
			  PTR_ERR(ctx));
Line -... Line 345...
-
 
345
		return PTR_ERR(ctx);
-
 
346
	}
-
 
347
 
433
		return PTR_ERR(ctx);
348
	for (i = 0; i < I915_NUM_RINGS; i++) {
434
	}
-
 
435
 
349
		struct intel_engine_cs *ring = &dev_priv->ring[i];
-
 
350
 
Line 436... Line 351...
436
	/* NB: RCS will hold a ref for all rings */
351
	/* NB: RCS will hold a ref for all rings */
-
 
352
		ring->default_context = ctx;
-
 
353
	}
437
	for (i = 0; i < I915_NUM_RINGS; i++)
354
 
438
		dev_priv->ring[i].default_context = ctx;
355
	DRM_DEBUG_DRIVER("%s context support initialized\n",
Line 439... Line 356...
439
 
356
			i915.enable_execlists ? "LR" :
440
	DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
357
			dev_priv->hw_context_size ? "HW" : "fake");
Line 487... Line 404...
487
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
404
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
488
{
405
{
489
	struct intel_engine_cs *ring;
406
	struct intel_engine_cs *ring;
490
	int ret, i;
407
	int ret, i;
Line 491... Line -...
491
 
-
 
492
	/* This is the only place the aliasing PPGTT gets enabled, which means
-
 
493
	 * it has to happen before we bail on reset */
408
 
494
	if (dev_priv->mm.aliasing_ppgtt) {
-
 
495
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
 
496
		ppgtt->enable(ppgtt);
-
 
Line 497... Line -...
497
	}
-
 
498
 
409
	BUG_ON(!dev_priv->ring[RCS].default_context);
499
	/* FIXME: We should make this work, even in reset */
410
 
Line 500... Line -...
500
	if (i915_reset_in_progress(&dev_priv->gpu_error))
-
 
501
		return 0;
-
 
502
 
411
	if (i915.enable_execlists)
503
	BUG_ON(!dev_priv->ring[RCS].default_context);
412
		return 0;
504
 
413
 
505
	for_each_ring(ring, dev_priv, i) {
414
	for_each_ring(ring, dev_priv, i) {
506
		ret = i915_switch_context(ring, ring->default_context);
415
		ret = i915_switch_context(ring, ring->default_context);
Line 525... Line 434...
525
	struct intel_context *ctx;
434
	struct intel_context *ctx;
Line 526... Line 435...
526
 
435
 
Line 527... Line 436...
527
	idr_init(&file_priv->context_idr);
436
	idr_init(&file_priv->context_idr);
528
 
437
 
529
	mutex_lock(&dev->struct_mutex);
438
	mutex_lock(&dev->struct_mutex);
Line 530... Line 439...
530
	ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
439
	ctx = i915_gem_create_context(dev, file_priv);
531
	mutex_unlock(&dev->struct_mutex);
440
	mutex_unlock(&dev->struct_mutex);
532
 
441
 
Line 561... Line 470...
561
static inline int
470
static inline int
562
mi_set_context(struct intel_engine_cs *ring,
471
mi_set_context(struct intel_engine_cs *ring,
563
	       struct intel_context *new_context,
472
	       struct intel_context *new_context,
564
	       u32 hw_flags)
473
	       u32 hw_flags)
565
{
474
{
-
 
475
	u32 flags = hw_flags | MI_MM_SPACE_GTT;
-
 
476
	const int num_rings =
-
 
477
		/* Use an extended w/a on ivb+ if signalling from other rings */
-
 
478
		i915_semaphore_is_enabled(ring->dev) ?
-
 
479
		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
-
 
480
		0;
566
	int ret;
481
	int len, i, ret;
Line 567... Line 482...
567
 
482
 
568
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
483
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
569
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
484
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
570
	 * explicitly, so we rely on the value at ring init, stored in
485
	 * explicitly, so we rely on the value at ring init, stored in
Line 574... Line 489...
574
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
489
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
575
		if (ret)
490
		if (ret)
576
			return ret;
491
			return ret;
577
	}
492
	}
Line -... Line 493...
-
 
493
 
-
 
494
	/* These flags are for resource streamer on HSW+ */
-
 
495
	if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
-
 
496
		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
-
 
497
 
-
 
498
 
-
 
499
	len = 4;
-
 
500
	if (INTEL_INFO(ring->dev)->gen >= 7)
-
 
501
		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
578
 
502
 
579
	ret = intel_ring_begin(ring, 6);
503
	ret = intel_ring_begin(ring, len);
580
	if (ret)
504
	if (ret)
Line 581... Line 505...
581
		return ret;
505
		return ret;
582
 
506
 
583
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
507
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-
 
508
	if (INTEL_INFO(ring->dev)->gen >= 7) {
-
 
509
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
584
	if (INTEL_INFO(ring->dev)->gen >= 7)
510
		if (num_rings) {
585
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
511
			struct intel_engine_cs *signaller;
-
 
512
 
-
 
513
			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-
 
514
			for_each_ring(signaller, to_i915(ring->dev), i) {
-
 
515
				if (signaller == ring)
-
 
516
					continue;
-
 
517
 
-
 
518
				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
-
 
519
				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
-
 
520
			}
Line 586... Line 521...
586
	else
521
		}
587
		intel_ring_emit(ring, MI_NOOP);
522
	}
588
 
523
 
589
	intel_ring_emit(ring, MI_NOOP);
-
 
590
	intel_ring_emit(ring, MI_SET_CONTEXT);
-
 
591
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
-
 
592
			MI_MM_SPACE_GTT |
524
	intel_ring_emit(ring, MI_NOOP);
593
			MI_SAVE_EXT_STATE_EN |
525
	intel_ring_emit(ring, MI_SET_CONTEXT);
594
			MI_RESTORE_EXT_STATE_EN |
526
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
595
			hw_flags);
527
			flags);
596
	/*
528
	/*
597
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
529
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
Line 598... Line 530...
598
	 * WaMiSetContext_Hang:snb,ivb,vlv
530
	 * WaMiSetContext_Hang:snb,ivb,vlv
-
 
531
	 */
-
 
532
	intel_ring_emit(ring, MI_NOOP);
-
 
533
 
-
 
534
	if (INTEL_INFO(ring->dev)->gen >= 7) {
-
 
535
		if (num_rings) {
-
 
536
			struct intel_engine_cs *signaller;
-
 
537
 
-
 
538
			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-
 
539
			for_each_ring(signaller, to_i915(ring->dev), i) {
-
 
540
				if (signaller == ring)
-
 
541
					continue;
-
 
542
 
599
	 */
543
				intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
600
	intel_ring_emit(ring, MI_NOOP);
544
				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
601
 
-
 
Line 602... Line 545...
602
	if (INTEL_INFO(ring->dev)->gen >= 7)
545
			}
Line 603... Line 546...
603
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
546
		}
604
	else
547
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
Line 612... Line 555...
612
static int do_switch(struct intel_engine_cs *ring,
555
static int do_switch(struct intel_engine_cs *ring,
613
		     struct intel_context *to)
556
		     struct intel_context *to)
614
{
557
{
615
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
558
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
616
	struct intel_context *from = ring->last_context;
559
	struct intel_context *from = ring->last_context;
617
	struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
-
 
618
	u32 hw_flags = 0;
560
	u32 hw_flags = 0;
619
	bool uninitialized = false;
561
	bool uninitialized = false;
-
 
562
	struct i915_vma *vma;
620
	int ret, i;
563
	int ret, i;
Line 621... Line 564...
621
 
564
 
622
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
565
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
623
		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
566
		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
Line 640... Line 583...
640
	 * evict_everything - as a last ditch gtt defrag effort that also
583
	 * evict_everything - as a last ditch gtt defrag effort that also
641
	 * switches to the default context. Hence we need to reload from here.
584
	 * switches to the default context. Hence we need to reload from here.
642
	 */
585
	 */
643
	from = ring->last_context;
586
	from = ring->last_context;
Line -... Line 587...
-
 
587
 
644
 
588
	if (to->ppgtt) {
645
	if (USES_FULL_PPGTT(ring->dev)) {
589
		trace_switch_mm(ring, to);
646
		ret = ppgtt->switch_mm(ppgtt, ring, false);
590
		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
647
		if (ret)
591
		if (ret)
648
			goto unpin_out;
592
			goto unpin_out;
Line 649... Line 593...
649
	}
593
	}
Line 664... Line 608...
664
	 */
608
	 */
665
	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
609
	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
666
	if (ret)
610
	if (ret)
667
		goto unpin_out;
611
		goto unpin_out;
Line 668... Line -...
668
 
-
 
669
	if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
612
 
670
		struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
613
	vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
671
							   &dev_priv->gtt.base);
614
	if (!(vma->bound & GLOBAL_BIND))
672
		vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
615
		vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
Line 673... Line 616...
673
	}
616
				GLOBAL_BIND);
674
 
617
 
Line 675... Line 618...
675
	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
618
	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
Line 721... Line 664...
721
done:
664
done:
722
	i915_gem_context_reference(to);
665
	i915_gem_context_reference(to);
723
	ring->last_context = to;
666
	ring->last_context = to;
Line 724... Line 667...
724
 
667
 
-
 
668
	if (uninitialized) {
-
 
669
		if (ring->init_context) {
-
 
670
			ret = ring->init_context(ring, to);
-
 
671
			if (ret)
-
 
672
				DRM_ERROR("ring init context: %d\n", ret);
-
 
673
		}
725
	if (uninitialized) {
674
 
726
		ret = i915_gem_render_state_init(ring);
675
		ret = i915_gem_render_state_init(ring);
727
		if (ret)
676
		if (ret)
728
			DRM_ERROR("init render state: %d\n", ret);
677
			DRM_ERROR("init render state: %d\n", ret);
Line 741... Line 690...
741
 * @ring: ring for which we'll execute the context switch
690
 * @ring: ring for which we'll execute the context switch
742
 * @to: the context to switch to
691
 * @to: the context to switch to
743
 *
692
 *
744
 * The context life cycle is simple. The context refcount is incremented and
693
 * The context life cycle is simple. The context refcount is incremented and
745
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
694
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
746
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
695
 * it will have a refcount > 1. This allows us to destroy the context abstract
747
 * object while letting the normal object tracking destroy the backing BO.
696
 * object while letting the normal object tracking destroy the backing BO.
-
 
697
 *
-
 
698
 * This function should not be used in execlists mode.  Instead the context is
-
 
699
 * switched by writing to the ELSP and requests keep a reference to their
-
 
700
 * context.
748
 */
701
 */
749
int i915_switch_context(struct intel_engine_cs *ring,
702
int i915_switch_context(struct intel_engine_cs *ring,
750
			struct intel_context *to)
703
			struct intel_context *to)
751
{
704
{
752
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
705
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
Line -... Line 706...
-
 
706
 
753
 
707
	WARN_ON(i915.enable_execlists);
Line 754... Line 708...
754
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
708
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
755
 
709
 
756
	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
710
	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
Line 764... Line 718...
764
	}
718
	}
Line 765... Line 719...
765
 
719
 
766
	return do_switch(ring, to);
720
	return do_switch(ring, to);
Line 767... Line 721...
767
}
721
}
768
 
722
 
769
static bool hw_context_enabled(struct drm_device *dev)
723
static bool contexts_enabled(struct drm_device *dev)
770
{
724
{
Line 771... Line 725...
771
	return to_i915(dev)->hw_context_size;
725
	return i915.enable_execlists || to_i915(dev)->hw_context_size;
772
}
726
}
773
 
727
 
774
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
728
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
775
				  struct drm_file *file)
729
				  struct drm_file *file)
776
{
730
{
777
	struct drm_i915_gem_context_create *args = data;
731
	struct drm_i915_gem_context_create *args = data;
Line 778... Line 732...
778
	struct drm_i915_file_private *file_priv = file->driver_priv;
732
	struct drm_i915_file_private *file_priv = file->driver_priv;
779
	struct intel_context *ctx;
733
	struct intel_context *ctx;
Line 780... Line 734...
780
	int ret;
734
	int ret;
781
 
735
 
782
	if (!hw_context_enabled(dev))
736
	if (!contexts_enabled(dev))
Line 783... Line 737...
783
		return -ENODEV;
737
		return -ENODEV;
784
 
738
 
785
	ret = i915_mutex_lock_interruptible(dev);
739
	ret = i915_mutex_lock_interruptible(dev);
786
	if (ret)
740
	if (ret)
Line 787... Line 741...
787
		return ret;
741
		return ret;