Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5354 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4560 Rev 5060
Line 91... Line 91...
91
 
91
 
92
/* This is a HW constraint. The value below is the largest known requirement
92
/* This is a HW constraint. The value below is the largest known requirement
93
 * I've seen in a spec to date, and that was a workaround for a non-shipping
93
 * I've seen in a spec to date, and that was a workaround for a non-shipping
94
 * part. It should be safe to decrease this, but it's more future proof as is.
94
 * part. It should be safe to decrease this, but it's more future proof as is.
95
 */
95
 */
-
 
96
#define GEN6_CONTEXT_ALIGN (64<<10)
Line 96... Line 97...
96
#define CONTEXT_ALIGN (64<<10)
97
#define GEN7_CONTEXT_ALIGN 4096
-
 
98
 
-
 
99
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
97
 
100
{
-
 
101
	struct drm_device *dev = ppgtt->base.dev;
-
 
102
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
103
	struct i915_address_space *vm = &ppgtt->base;
-
 
104
 
-
 
105
	if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
-
 
106
	    (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
-
 
107
		ppgtt->base.cleanup(&ppgtt->base);
-
 
108
		return;
-
 
109
	}
-
 
110
 
-
 
111
	/*
-
 
112
	 * Make sure vmas are unbound before we take down the drm_mm
-
 
113
	 *
-
 
114
	 * FIXME: Proper refcounting should take care of this, this shouldn't be
-
 
115
	 * needed at all.
-
 
116
	 */
-
 
117
	if (!list_empty(&vm->active_list)) {
-
 
118
		struct i915_vma *vma;
-
 
119
 
-
 
120
		list_for_each_entry(vma, &vm->active_list, mm_list)
-
 
121
			if (WARN_ON(list_empty(&vma->vma_link) ||
-
 
122
				    list_is_singular(&vma->vma_link)))
-
 
123
				break;
-
 
124
 
-
 
125
		i915_gem_evict_vm(&ppgtt->base, true);
-
 
126
	} else {
-
 
127
		i915_gem_retire_requests(dev);
-
 
128
		i915_gem_evict_vm(&ppgtt->base, false);
-
 
129
	}
-
 
130
 
-
 
131
	ppgtt->base.cleanup(&ppgtt->base);
-
 
132
}
-
 
133
 
-
 
134
static void ppgtt_release(struct kref *kref)
98
static struct i915_hw_context *
135
{
-
 
136
	struct i915_hw_ppgtt *ppgtt =
-
 
137
		container_of(kref, struct i915_hw_ppgtt, ref);
-
 
138
 
-
 
139
	do_ppgtt_cleanup(ppgtt);
-
 
140
	kfree(ppgtt);
-
 
141
}
-
 
142
 
-
 
143
static size_t get_context_alignment(struct drm_device *dev)
-
 
144
{
-
 
145
	if (IS_GEN6(dev))
-
 
146
		return GEN6_CONTEXT_ALIGN;
-
 
147
 
Line 99... Line 148...
99
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
148
	return GEN7_CONTEXT_ALIGN;
100
static int do_switch(struct i915_hw_context *to);
149
}
101
 
150
 
102
static int get_context_size(struct drm_device *dev)
151
static int get_context_size(struct drm_device *dev)
Line 127... Line 176...
127
	return ret;
176
	return ret;
128
}
177
}
Line 129... Line 178...
129
 
178
 
130
void i915_gem_context_free(struct kref *ctx_ref)
179
void i915_gem_context_free(struct kref *ctx_ref)
131
{
180
{
132
	struct i915_hw_context *ctx = container_of(ctx_ref,
181
	struct intel_context *ctx = container_of(ctx_ref,
-
 
182
						   typeof(*ctx), ref);
-
 
183
	struct i915_hw_ppgtt *ppgtt = NULL;
-
 
184
 
-
 
185
	if (ctx->legacy_hw_ctx.rcs_state) {
-
 
186
		/* We refcount even the aliasing PPGTT to keep the code symmetric */
-
 
187
		if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
-
 
188
			ppgtt = ctx_to_ppgtt(ctx);
Line -... Line 189...
-
 
189
	}
-
 
190
 
-
 
191
	if (ppgtt)
-
 
192
		kref_put(&ppgtt->ref, ppgtt_release);
133
						   typeof(*ctx), ref);
193
	if (ctx->legacy_hw_ctx.rcs_state)
134
 
-
 
135
	list_del(&ctx->link);
194
		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
136
	drm_gem_object_unreference(&ctx->obj->base);
195
	list_del(&ctx->link);
Line -... Line 196...
-
 
196
	kfree(ctx);
-
 
197
}
-
 
198
 
-
 
199
static struct drm_i915_gem_object *
-
 
200
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
-
 
201
{
-
 
202
	struct drm_i915_gem_object *obj;
-
 
203
	int ret;
-
 
204
 
-
 
205
	obj = i915_gem_alloc_object(dev, size);
-
 
206
	if (obj == NULL)
-
 
207
		return ERR_PTR(-ENOMEM);
-
 
208
 
-
 
209
	/*
-
 
210
	 * Try to make the context utilize L3 as well as LLC.
-
 
211
	 *
-
 
212
	 * On VLV we don't have L3 controls in the PTEs so we
-
 
213
	 * shouldn't touch the cache level, especially as that
-
 
214
	 * would make the object snooped which might have a
-
 
215
	 * negative performance impact.
-
 
216
	 */
-
 
217
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
-
 
218
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
-
 
219
		/* Failure shouldn't ever happen this early */
-
 
220
		if (WARN_ON(ret)) {
-
 
221
			drm_gem_object_unreference(&obj->base);
-
 
222
			return ERR_PTR(ret);
-
 
223
		}
-
 
224
	}
-
 
225
 
137
	kfree(ctx);
226
	return obj;
-
 
227
}
-
 
228
 
-
 
229
static struct i915_hw_ppgtt *
-
 
230
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
-
 
231
{
-
 
232
	struct i915_hw_ppgtt *ppgtt;
-
 
233
	int ret;
-
 
234
 
-
 
235
	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-
 
236
	if (!ppgtt)
-
 
237
		return ERR_PTR(-ENOMEM);
-
 
238
 
-
 
239
	ret = i915_gem_init_ppgtt(dev, ppgtt);
-
 
240
	if (ret) {
-
 
241
		kfree(ppgtt);
-
 
242
		return ERR_PTR(ret);
-
 
243
	}
-
 
244
 
-
 
245
	ppgtt->ctx = ctx;
-
 
246
	return ppgtt;
138
}
247
}
139
 
248
 
140
static struct i915_hw_context *
249
static struct intel_context *
141
create_hw_context(struct drm_device *dev,
250
__create_hw_context(struct drm_device *dev,
142
		  struct drm_i915_file_private *file_priv)
251
		  struct drm_i915_file_private *file_priv)
143
{
252
{
Line 144... Line 253...
144
	struct drm_i915_private *dev_priv = dev->dev_private;
253
	struct drm_i915_private *dev_priv = dev->dev_private;
145
	struct i915_hw_context *ctx;
254
	struct intel_context *ctx;
146
	int ret;
255
	int ret;
Line 147... Line 256...
147
 
256
 
148
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
257
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
149
	if (ctx == NULL)
-
 
150
		return ERR_PTR(-ENOMEM);
-
 
151
 
-
 
152
	kref_init(&ctx->ref);
-
 
153
	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
-
 
154
	INIT_LIST_HEAD(&ctx->link);
-
 
Line 155... Line 258...
155
	if (ctx->obj == NULL) {
258
	if (ctx == NULL)
156
		kfree(ctx);
259
		return ERR_PTR(-ENOMEM);
157
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
260
 
158
		return ERR_PTR(-ENOMEM);
261
	kref_init(&ctx->ref);
159
	}
262
	list_add_tail(&ctx->link, &dev_priv->context_list);
160
 
263
 
161
	if (INTEL_INFO(dev)->gen >= 7) {
264
	if (dev_priv->hw_context_size) {
162
		ret = i915_gem_object_set_cache_level(ctx->obj,
-
 
163
						      I915_CACHE_L3_LLC);
-
 
164
		/* Failure shouldn't ever happen this early */
-
 
165
		if (WARN_ON(ret))
265
		struct drm_i915_gem_object *obj =
166
			goto err_out;
266
				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
167
	}
-
 
168
 
-
 
Line 169... Line 267...
169
	/* The ring associated with the context object is handled by the normal
267
		if (IS_ERR(obj)) {
170
	 * object tracking code. We give an initial ring value simple to pass an
268
			ret = PTR_ERR(obj);
171
	 * assertion in the context switch code.
-
 
172
	 */
-
 
173
	ctx->ring = &dev_priv->ring[RCS];
269
			goto err_out;
174
	list_add_tail(&ctx->link, &dev_priv->context_list);
270
	}
175
 
271
		ctx->legacy_hw_ctx.rcs_state = obj;
176
	/* Default context will never have a file_priv */
272
	}
-
 
273
 
-
 
274
	/* Default context will never have a file_priv */
Line 177... Line 275...
177
	if (file_priv == NULL)
275
	if (file_priv != NULL) {
178
		return ctx;
276
		ret = idr_alloc(&file_priv->context_idr, ctx,
179
 
277
				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
180
	ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
278
	if (ret < 0)
181
			GFP_KERNEL);
279
		goto err_out;
182
	if (ret < 0)
280
	} else
Line 194... Line 292...
194
err_out:
292
err_out:
195
	i915_gem_context_unreference(ctx);
293
	i915_gem_context_unreference(ctx);
196
	return ERR_PTR(ret);
294
	return ERR_PTR(ret);
197
}
295
}
Line 198... Line -...
198
 
-
 
199
static inline bool is_default_context(struct i915_hw_context *ctx)
-
 
200
{
-
 
201
	return (ctx == ctx->ring->default_context);
-
 
202
}
-
 
203
 
296
 
204
/**
297
/**
205
 * The default context needs to exist per ring that uses contexts. It stores the
298
 * The default context needs to exist per ring that uses contexts. It stores the
206
 * context state of the GPU for applications that don't utilize HW contexts, as
299
 * context state of the GPU for applications that don't utilize HW contexts, as
207
 * well as an idle case.
300
 * well as an idle case.
-
 
301
 */
208
 */
302
static struct intel_context *
-
 
303
i915_gem_create_context(struct drm_device *dev,
-
 
304
			struct drm_i915_file_private *file_priv,
209
static int create_default_context(struct drm_i915_private *dev_priv)
305
			bool create_vm)
-
 
306
{
-
 
307
	const bool is_global_default_ctx = file_priv == NULL;
210
{
308
	struct drm_i915_private *dev_priv = dev->dev_private;
211
	struct i915_hw_context *ctx;
309
	struct intel_context *ctx;
Line 212... Line 310...
212
	int ret;
310
	int ret = 0;
Line 213... Line 311...
213
 
311
 
214
	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
312
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
215
 
313
 
Line -... Line 314...
-
 
314
	ctx = __create_hw_context(dev, file_priv);
216
	ctx = create_hw_context(dev_priv->dev, NULL);
315
	if (IS_ERR(ctx))
217
	if (IS_ERR(ctx))
316
		return ctx;
-
 
317
 
218
		return PTR_ERR(ctx);
318
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
219
 
319
		/* We may need to do things with the shrinker which
220
	/* We may need to do things with the shrinker which require us to
320
		 * require us to immediately switch back to the default
221
	 * immediately switch back to the default context. This can cause a
321
		 * context. This can cause a problem as pinning the
222
	 * problem as pinning the default context also requires GTT space which
322
		 * default context also requires GTT space which may not
-
 
323
		 * be available. To avoid this we always pin the default
223
	 * may not be available. To avoid this we always pin the
324
		 * context.
224
	 * default context.
325
	 */
225
	 */
326
		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
226
	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
327
					    get_context_alignment(dev), 0);
-
 
328
	if (ret) {
Line -... Line 329...
-
 
329
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
-
 
330
		goto err_destroy;
-
 
331
	}
-
 
332
	}
-
 
333
 
-
 
334
	if (create_vm) {
227
	if (ret) {
335
		struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
-
 
336
 
228
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
337
		if (IS_ERR_OR_NULL(ppgtt)) {
-
 
338
			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
-
 
339
					 PTR_ERR(ppgtt));
-
 
340
			ret = PTR_ERR(ppgtt);
-
 
341
			goto err_unpin;
-
 
342
		} else
229
		goto err_destroy;
343
			ctx->vm = &ppgtt->base;
-
 
344
 
230
	}
345
		/* This case is reserved for the global default context and
231
 
346
		 * should only happen once. */
Line 232... Line 347...
232
	ret = do_switch(ctx);
347
		if (is_global_default_ctx) {
-
 
348
			if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
-
 
349
				ret = -EEXIST;
-
 
350
		goto err_unpin;
-
 
351
	}
-
 
352
 
-
 
353
			dev_priv->mm.aliasing_ppgtt = ppgtt;
-
 
354
		}
-
 
355
	} else if (USES_PPGTT(dev)) {
Line 233... Line -...
233
	if (ret) {
-
 
234
		DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
356
		/* For platforms which only have aliasing PPGTT, we fake the
Line 235... Line 357...
235
		goto err_unpin;
357
		 * address space and refcounting. */
-
 
358
		ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
236
	}
359
		kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
237
 
360
	} else
238
	dev_priv->ring[RCS].default_context = ctx;
361
		ctx->vm = &dev_priv->gtt.base;
239
 
362
 
240
	DRM_DEBUG_DRIVER("Default HW context loaded\n");
363
	return ctx;
Line 241... Line 364...
241
	return 0;
364
 
242
 
365
err_unpin:
243
err_unpin:
366
	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
244
	i915_gem_object_unpin(ctx->obj);
367
		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
Line -... Line 368...
-
 
368
err_destroy:
-
 
369
	i915_gem_context_unreference(ctx);
-
 
370
	return ERR_PTR(ret);
-
 
371
}
-
 
372
 
-
 
373
void i915_gem_context_reset(struct drm_device *dev)
-
 
374
{
-
 
375
	struct drm_i915_private *dev_priv = dev->dev_private;
245
err_destroy:
376
	int i;
246
	i915_gem_context_unreference(ctx);
377
 
-
 
378
	/* Prevent the hardware from restoring the last context (which hung) on
-
 
379
	 * the next switch */
-
 
380
	for (i = 0; i < I915_NUM_RINGS; i++) {
-
 
381
		struct intel_engine_cs *ring = &dev_priv->ring[i];
-
 
382
		struct intel_context *dctx = ring->default_context;
-
 
383
		struct intel_context *lctx = ring->last_context;
-
 
384
 
-
 
385
		/* Do a fake switch to the default context */
-
 
386
		if (lctx == dctx)
-
 
387
			continue;
-
 
388
 
-
 
389
		if (!lctx)
-
 
390
			continue;
-
 
391
 
-
 
392
		if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
-
 
393
			WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
-
 
394
						      get_context_alignment(dev), 0));
-
 
395
			/* Fake a finish/inactive */
-
 
396
			dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
-
 
397
			dctx->legacy_hw_ctx.rcs_state->active = 0;
-
 
398
		}
-
 
399
 
-
 
400
		if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
-
 
401
			i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
-
 
402
 
-
 
403
		i915_gem_context_unreference(lctx);
Line -... Line 404...
-
 
404
		i915_gem_context_reference(dctx);
247
	return ret;
405
		ring->last_context = dctx;
248
}
406
	}
249
 
407
}
Line -... Line 408...
-
 
408
 
250
int i915_gem_context_init(struct drm_device *dev)
409
int i915_gem_context_init(struct drm_device *dev)
251
{
-
 
252
	struct drm_i915_private *dev_priv = dev->dev_private;
410
{
253
	int ret;
411
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
412
	struct intel_context *ctx;
254
 
413
	int i;
-
 
414
 
255
	if (!HAS_HW_CONTEXTS(dev))
415
	/* Init should only be called once per module load. Eventually the
Line 256... Line 416...
256
		return 0;
416
	 * restriction on the context_disabled check can be loosened. */
257
 
417
	if (WARN_ON(dev_priv->ring[RCS].default_context))
258
	/* If called from reset, or thaw... we've been here already */
418
		return 0;
259
	if (dev_priv->ring[RCS].default_context)
419
 
260
		return 0;
420
	if (HAS_HW_CONTEXTS(dev)) {
261
 
421
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
Line -... Line 422...
-
 
422
	if (dev_priv->hw_context_size > (1<<20)) {
-
 
423
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
-
 
424
					 dev_priv->hw_context_size);
-
 
425
			dev_priv->hw_context_size = 0;
262
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
426
		}
263
 
427
	}
264
	if (dev_priv->hw_context_size > (1<<20)) {
428
 
Line 265... Line 429...
265
		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
429
	ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
266
		return -E2BIG;
430
	if (IS_ERR(ctx)) {
267
	}
431
		DRM_ERROR("Failed to create default global context (error %ld)\n",
268
 
432
			  PTR_ERR(ctx));
269
	ret = create_default_context(dev_priv);
-
 
270
	if (ret) {
-
 
271
		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
433
		return PTR_ERR(ctx);
Line -... Line 434...
-
 
434
	}
272
				 ret);
435
 
273
		return ret;
436
	/* NB: RCS will hold a ref for all rings */
274
	}
437
	for (i = 0; i < I915_NUM_RINGS; i++)
275
 
438
		dev_priv->ring[i].default_context = ctx;
Line 297... Line 460...
297
	 * to offset the do_switch part, so that i915_gem_context_unreference()
460
	 * to offset the do_switch part, so that i915_gem_context_unreference()
298
	 * can then free the base object correctly. */
461
	 * can then free the base object correctly. */
299
	WARN_ON(!dev_priv->ring[RCS].last_context);
462
	WARN_ON(!dev_priv->ring[RCS].last_context);
300
	if (dev_priv->ring[RCS].last_context == dctx) {
463
	if (dev_priv->ring[RCS].last_context == dctx) {
301
		/* Fake switch to NULL context */
464
		/* Fake switch to NULL context */
302
		WARN_ON(dctx->obj->active);
465
			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
303
		i915_gem_object_unpin(dctx->obj);
466
			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
304
	i915_gem_context_unreference(dctx);
467
	i915_gem_context_unreference(dctx);
-
 
468
			dev_priv->ring[RCS].last_context = NULL;
-
 
469
	}
-
 
470
 
-
 
471
		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
-
 
472
	}
-
 
473
 
-
 
474
	for (i = 0; i < I915_NUM_RINGS; i++) {
-
 
475
		struct intel_engine_cs *ring = &dev_priv->ring[i];
-
 
476
 
-
 
477
		if (ring->last_context)
-
 
478
			i915_gem_context_unreference(ring->last_context);
-
 
479
 
-
 
480
		ring->default_context = NULL;
-
 
481
		ring->last_context = NULL;
305
	}
482
	}
Line 306... Line -...
306
 
-
 
307
	i915_gem_object_unpin(dctx->obj);
483
 
308
	i915_gem_context_unreference(dctx);
-
 
309
	dev_priv->ring[RCS].default_context = NULL;
-
 
310
	dev_priv->ring[RCS].last_context = NULL;
484
	i915_gem_context_unreference(dctx);
Line 311... Line 485...
311
}
485
}
312
 
486
 
313
static int context_idr_cleanup(int id, void *p, void *data)
487
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
-
 
488
{
Line -... Line 489...
-
 
489
	struct intel_engine_cs *ring;
-
 
490
	int ret, i;
-
 
491
 
-
 
492
	/* This is the only place the aliasing PPGTT gets enabled, which means
-
 
493
	 * it has to happen before we bail on reset */
-
 
494
	if (dev_priv->mm.aliasing_ppgtt) {
-
 
495
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
 
496
		ppgtt->enable(ppgtt);
-
 
497
	}
-
 
498
 
-
 
499
	/* FIXME: We should make this work, even in reset */
314
{
500
	if (i915_reset_in_progress(&dev_priv->gpu_error))
-
 
501
		return 0;
-
 
502
 
-
 
503
	BUG_ON(!dev_priv->ring[RCS].default_context);
-
 
504
 
-
 
505
	for_each_ring(ring, dev_priv, i) {
-
 
506
		ret = i915_switch_context(ring, ring->default_context);
-
 
507
		if (ret)
-
 
508
			return ret;
-
 
509
	}
-
 
510
 
-
 
511
	return 0;
-
 
512
}
-
 
513
 
Line 315... Line 514...
315
	struct i915_hw_context *ctx = p;
514
static int context_idr_cleanup(int id, void *p, void *data)
316
 
515
{
317
	BUG_ON(id == DEFAULT_CONTEXT_ID);
516
	struct intel_context *ctx = p;
Line 318... Line -...
318
 
-
 
319
	i915_gem_context_unreference(ctx);
517
 
320
	return 0;
-
 
321
}
-
 
322
 
518
	i915_gem_context_unreference(ctx);
323
struct i915_ctx_hang_stats *
519
	return 0;
324
i915_gem_context_get_hang_stats(struct drm_device *dev,
520
}
Line 325... Line -...
325
				struct drm_file *file,
-
 
326
				u32 id)
521
 
Line 327... Line 522...
327
{
522
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
-
 
523
{
328
	struct drm_i915_file_private *file_priv = file->driver_priv;
524
	struct drm_i915_file_private *file_priv = file->driver_priv;
Line 329... Line -...
329
	struct i915_hw_context *ctx;
-
 
330
 
525
	struct intel_context *ctx;
-
 
526
 
331
	if (id == DEFAULT_CONTEXT_ID)
527
	idr_init(&file_priv->context_idr);
-
 
528
 
Line 332... Line 529...
332
		return &file_priv->hang_stats;
529
	mutex_lock(&dev->struct_mutex);
333
 
530
	ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
Line 334... Line 531...
334
	if (!HAS_HW_CONTEXTS(dev))
531
	mutex_unlock(&dev->struct_mutex);
335
		return ERR_PTR(-ENOENT);
532
 
336
 
533
	if (IS_ERR(ctx)) {
Line 337... Line 534...
337
		ctx = i915_gem_context_get(file->driver_priv, id);
534
		idr_destroy(&file_priv->context_idr);
338
	if (ctx == NULL)
535
		return PTR_ERR(ctx);
339
		return ERR_PTR(-ENOENT);
536
	}
Line 340... Line 537...
340
 
537
 
341
	return &ctx->hang_stats;
538
	return 0;
342
}
539
}
-
 
540
 
-
 
541
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
343
 
542
{
-
 
543
	struct drm_i915_file_private *file_priv = file->driver_priv;
-
 
544
 
-
 
545
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
-
 
546
	idr_destroy(&file_priv->context_idr);
344
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
547
}
Line 345... Line 548...
345
{
548
 
346
	struct drm_i915_file_private *file_priv = file->driver_priv;
549
struct intel_context *
347
 
550
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
348
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
551
{
349
	idr_destroy(&file_priv->context_idr);
552
	struct intel_context *ctx;
350
}
553
 
Line 351... Line 554...
351
 
554
	ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
352
static struct i915_hw_context *
555
	if (!ctx)
353
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
556
		return ERR_PTR(-ENOENT);
354
{
557
 
355
	return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
558
	return ctx;
356
}
559
}
357
 
560
 
358
static inline int
561
static inline int
359
mi_set_context(struct intel_ring_buffer *ring,
562
mi_set_context(struct intel_engine_cs *ring,
360
	       struct i915_hw_context *new_context,
563
	       struct intel_context *new_context,
Line 361... Line 564...
361
	       u32 hw_flags)
564
	       u32 hw_flags)
362
{
565
{
363
	int ret;
566
	int ret;
Line 364... Line 567...
364
 
567
 
365
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
568
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
366
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
569
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
367
	 * explicitly, so we rely on the value at ring init, stored in
570
	 * explicitly, so we rely on the value at ring init, stored in
368
	 * itlb_before_ctx_switch.
571
	 * itlb_before_ctx_switch.
Line 369... Line 572...
369
	 */
572
	 */
370
	if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
573
	if (IS_GEN6(ring->dev)) {
371
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
574
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
372
		if (ret)
575
		if (ret)
373
			return ret;
576
			return ret;
374
	}
577
	}
375
 
578
 
-
 
579
	ret = intel_ring_begin(ring, 6);
376
	ret = intel_ring_begin(ring, 6);
580
	if (ret)
-
 
581
		return ret;
-
 
582
 
377
	if (ret)
583
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
Line 378... Line 584...
378
		return ret;
584
	if (INTEL_INFO(ring->dev)->gen >= 7)
379
 
585
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
380
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
586
	else
381
	if (IS_GEN7(ring->dev))
587
		intel_ring_emit(ring, MI_NOOP);
Line 382... Line 588...
382
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
588
 
Line 383... Line 589...
383
	else
589
	intel_ring_emit(ring, MI_NOOP);
384
		intel_ring_emit(ring, MI_NOOP);
590
	intel_ring_emit(ring, MI_SET_CONTEXT);
Line 385... Line 591...
385
 
591
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
-
 
592
			MI_MM_SPACE_GTT |
386
	intel_ring_emit(ring, MI_NOOP);
593
			MI_SAVE_EXT_STATE_EN |
387
	intel_ring_emit(ring, MI_SET_CONTEXT);
594
			MI_RESTORE_EXT_STATE_EN |
388
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
595
			hw_flags);
-
 
596
	/*
389
			MI_MM_SPACE_GTT |
597
	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
-
 
598
	 * WaMiSetContext_Hang:snb,ivb,vlv
390
			MI_SAVE_EXT_STATE_EN |
599
	 */
Line 391... Line 600...
391
			MI_RESTORE_EXT_STATE_EN |
600
	intel_ring_emit(ring, MI_NOOP);
-
 
601
 
-
 
602
	if (INTEL_INFO(ring->dev)->gen >= 7)
-
 
603
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
Line 392... Line 604...
392
			hw_flags);
604
	else
393
	/* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
605
		intel_ring_emit(ring, MI_NOOP);
Line -... Line 606...
-
 
606
 
-
 
607
	intel_ring_advance(ring);
394
	intel_ring_emit(ring, MI_NOOP);
608
 
-
 
609
	return ret;
395
 
610
}
396
	if (IS_GEN7(ring->dev))
611
 
-
 
612
static int do_switch(struct intel_engine_cs *ring,
Line 397... Line 613...
397
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
613
		     struct intel_context *to)
398
	else
614
{
399
		intel_ring_emit(ring, MI_NOOP);
615
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
400
 
616
	struct intel_context *from = ring->last_context;
401
	intel_ring_advance(ring);
617
	struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
402
 
618
	u32 hw_flags = 0;
Line -... Line 619...
-
 
619
	bool uninitialized = false;
-
 
620
	int ret, i;
-
 
621
 
-
 
622
	if (from != NULL && ring == &dev_priv->ring[RCS]) {
-
 
623
		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
-
 
624
		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
-
 
625
	}
-
 
626
 
-
 
627
	if (from == to && !to->remap_slice)
-
 
628
		return 0;
-
 
629
 
-
 
630
	/* Trying to pin first makes error handling easier. */
403
	return ret;
631
	if (ring == &dev_priv->ring[RCS]) {
404
}
632
		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
405
 
633
					    get_context_alignment(ring->dev), 0);
406
static int do_switch(struct i915_hw_context *to)
634
	if (ret)
407
{
635
		return ret;
408
	struct intel_ring_buffer *ring = to->ring;
636
	}
409
	struct i915_hw_context *from = ring->last_context;
637
 
410
	u32 hw_flags = 0;
638
	/*
411
	int ret, i;
639
	 * Pin can switch back to the default context if we end up calling into
412
 
640
	 * evict_everything - as a last ditch gtt defrag effort that also
413
	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
-
 
414
 
641
	 * switches to the default context. Hence we need to reload from here.
415
	if (from == to && !to->remap_slice)
-
 
Line 416... Line 642...
416
		return 0;
642
	 */
-
 
643
	from = ring->last_context;
-
 
644
 
417
 
645
	if (USES_FULL_PPGTT(ring->dev)) {
-
 
646
		ret = ppgtt->switch_mm(ppgtt, ring, false);
Line 418... Line 647...
418
	ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
647
		if (ret)
419
	if (ret)
648
			goto unpin_out;
Line 420... Line 649...
420
		return ret;
649
	}
421
 
650
 
422
	/*
-
 
423
	 * Pin can switch back to the default context if we end up calling into
651
	if (ring != &dev_priv->ring[RCS]) {
424
	 * evict_everything - as a last ditch gtt defrag effort that also
-
 
Line 425... Line 652...
425
	 * switches to the default context. Hence we need to reload from here.
652
		if (from)
426
	 */
653
			i915_gem_context_unreference(from);
427
	from = ring->last_context;
654
		goto done;
Line 469... Line 696...
469
	 * the next context has already started running. In fact, the below code
696
	 * the next context has already started running. In fact, the below code
470
	 * is a bit suboptimal because the retiring can occur simply after the
697
	 * is a bit suboptimal because the retiring can occur simply after the
471
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
698
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
472
	 */
699
	 */
473
	if (from != NULL) {
700
	if (from != NULL) {
474
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
701
		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
475
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
702
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
476
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
703
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
477
		 * whole damn pipeline, we don't need to explicitly mark the
704
		 * whole damn pipeline, we don't need to explicitly mark the
478
		 * object dirty. The only exception is that the context must be
705
		 * object dirty. The only exception is that the context must be
479
		 * correct in case the object gets swapped out. Ideally we'd be
706
		 * correct in case the object gets swapped out. Ideally we'd be
480
		 * able to defer doing this until we know the object would be
707
		 * able to defer doing this until we know the object would be
481
		 * swapped, but there is no way to do that yet.
708
		 * swapped, but there is no way to do that yet.
482
		 */
709
		 */
483
		from->obj->dirty = 1;
710
		from->legacy_hw_ctx.rcs_state->dirty = 1;
484
		BUG_ON(from->obj->ring != ring);
711
		BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
Line 485... Line 712...
485
 
712
 
486
		/* obj is kept alive until the next request by its active ref */
713
		/* obj is kept alive until the next request by its active ref */
487
		i915_gem_object_unpin(from->obj);
714
		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
488
		i915_gem_context_unreference(from);
715
		i915_gem_context_unreference(from);
Line -... Line 716...
-
 
716
	}
-
 
717
 
-
 
718
	uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
-
 
719
	to->legacy_hw_ctx.initialized = true;
489
	}
720
 
490
 
721
done:
-
 
722
	i915_gem_context_reference(to);
491
	i915_gem_context_reference(to);
723
	ring->last_context = to;
-
 
724
 
-
 
725
	if (uninitialized) {
-
 
726
		ret = i915_gem_render_state_init(ring);
-
 
727
		if (ret)
Line 492... Line 728...
492
	ring->last_context = to;
728
			DRM_ERROR("init render state: %d\n", ret);
-
 
729
	}
-
 
730
 
-
 
731
	return 0;
-
 
732
 
-
 
733
unpin_out:
493
	to->is_initialized = true;
734
	if (ring->id == RCS)
Line 494... Line 735...
494
 
735
		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
495
	return 0;
736
	return ret;
496
}
737
}
497
 
-
 
498
/**
738
 
499
 * i915_switch_context() - perform a GPU context switch.
739
/**
500
 * @ring: ring for which we'll execute the context switch
740
 * i915_switch_context() - perform a GPU context switch.
501
 * @file_priv: file_priv associated with the context, may be NULL
741
 * @ring: ring for which we'll execute the context switch
502
 * @id: context id number
742
 * @to: the context to switch to
503
 *
743
 *
504
 * The context life cycle is simple. The context refcount is incremented and
744
 * The context life cycle is simple. The context refcount is incremented and
505
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
745
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
506
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
746
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
507
 * object while letting the normal object tracking destroy the backing BO.
-
 
508
 */
747
 * object while letting the normal object tracking destroy the backing BO.
509
int i915_switch_context(struct intel_ring_buffer *ring,
748
 */
510
			struct drm_file *file,
-
 
511
			int to_id)
-
 
512
{
-
 
513
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
Line 514... Line 749...
514
	struct i915_hw_context *to;
749
int i915_switch_context(struct intel_engine_cs *ring,
Line -... Line 750...
-
 
750
			struct intel_context *to)
515
 
751
{
-
 
752
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
753
 
-
 
754
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
-
 
755
 
-
 
756
	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
516
	if (!HAS_HW_CONTEXTS(ring->dev))
757
		if (to != ring->last_context) {
-
 
758
			i915_gem_context_reference(to);
Line 517... Line -...
517
		return 0;
-
 
518
 
759
			if (ring->last_context)
519
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
-
 
520
 
-
 
521
	if (ring != &dev_priv->ring[RCS])
-
 
522
		return 0;
-
 
523
 
-
 
524
	if (to_id == DEFAULT_CONTEXT_ID) {
-
 
525
		to = ring->default_context;
-
 
526
	} else {
760
				i915_gem_context_unreference(ring->last_context);
Line -... Line 761...
-
 
761
			ring->last_context = to;
-
 
762
		}
527
		if (file == NULL)
763
		return 0;
528
			return -EINVAL;
764
	}
Line 529... Line 765...
529
 
765
 
530
		to = i915_gem_context_get(file->driver_priv, to_id);
766
	return do_switch(ring, to);
531
		if (to == NULL)
767
}
532
			return -ENOENT;
768
 
533
	}
769
static bool hw_context_enabled(struct drm_device *dev)
534
 
770
{
535
	return do_switch(to);
771
	return to_i915(dev)->hw_context_size;
Line 536... Line -...
536
}
-
 
537
 
-
 
538
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
-
 
539
				  struct drm_file *file)
772
}
540
{
773
 
Line 541... Line 774...
541
	struct drm_i915_gem_context_create *args = data;
774
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
542
	struct drm_i915_file_private *file_priv = file->driver_priv;
775
				  struct drm_file *file)
543
	struct i915_hw_context *ctx;
776
{
Line 544... Line 777...
544
	int ret;
777
	struct drm_i915_gem_context_create *args = data;
545
 
778
	struct drm_i915_file_private *file_priv = file->driver_priv;
546
	if (!(dev->driver->driver_features & DRIVER_GEM))
779
	struct intel_context *ctx;
547
		return -ENODEV;
780
	int ret;
Line 548... Line 781...
548
 
781
 
549
	if (!HAS_HW_CONTEXTS(dev))
782
	if (!hw_context_enabled(dev))
Line 550... Line 783...
550
		return -ENODEV;
783
		return -ENODEV;
551
 
784
 
Line 552... Line 785...
552
	ret = i915_mutex_lock_interruptible(dev);
785
	ret = i915_mutex_lock_interruptible(dev);
553
	if (ret)
786
	if (ret)
554
		return ret;
787
		return ret;
555
 
788
 
556
	ctx = create_hw_context(dev, file_priv);
789
	ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
557
	mutex_unlock(&dev->struct_mutex);
790
	mutex_unlock(&dev->struct_mutex);
558
	if (IS_ERR(ctx))
791
	if (IS_ERR(ctx))
Line 559... Line 792...
559
		return PTR_ERR(ctx);
792
		return PTR_ERR(ctx);
560
 
793
 
Line 561... Line 794...
561
	args->ctx_id = ctx->id;
794
	args->ctx_id = ctx->user_handle;
562
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
795
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
563
 
796
 
Line 564... Line 797...
564
	return 0;
797
	return 0;
565
}
798
}
566
 
799
 
567
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
800
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
568
				   struct drm_file *file)
801
				   struct drm_file *file)
Line 569... Line 802...
569
{
802
{
570
	struct drm_i915_gem_context_destroy *args = data;
803
	struct drm_i915_gem_context_destroy *args = data;
571
	struct drm_i915_file_private *file_priv = file->driver_priv;
804
	struct drm_i915_file_private *file_priv = file->driver_priv;
Line 572... Line 805...
572
	struct i915_hw_context *ctx;
805
	struct intel_context *ctx;
573
	int ret;
806
	int ret;