Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5354 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4539 Rev 4560
Line 71... Line 71...
71
 * S3->S5->S0: destroy path
71
 * S3->S5->S0: destroy path
72
 * S4->S5->S0: destroy path on current context
72
 * S4->S5->S0: destroy path on current context
73
 *
73
 *
74
 * There are two confusing terms used above:
74
 * There are two confusing terms used above:
75
 *  The "current context" means the context which is currently running on the
75
 *  The "current context" means the context which is currently running on the
76
 *  GPU. The GPU has loaded it's state already and has stored away the gtt
76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
77
 *  offset of the BO. The GPU is not actively referencing the data at this
77
 *  offset of the BO. The GPU is not actively referencing the data at this
78
 *  offset, but it will on the next context switch. The only way to avoid this
78
 *  offset, but it will on the next context switch. The only way to avoid this
79
 *  is to do a GPU reset.
79
 *  is to do a GPU reset.
80
 *
80
 *
81
 *  An "active context' is one which was previously the "current context" and is
81
 *  An "active context' is one which was previously the "current context" and is
Line 115... Line 115...
115
		if (IS_HASWELL(dev))
115
		if (IS_HASWELL(dev))
116
			ret = HSW_CXT_TOTAL_SIZE;
116
			ret = HSW_CXT_TOTAL_SIZE;
117
		else
117
		else
118
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
118
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
119
		break;
119
		break;
-
 
120
	case 8:
-
 
121
		ret = GEN8_CXT_TOTAL_SIZE;
-
 
122
		break;
120
	default:
123
	default:
121
		BUG();
124
		BUG();
122
	}
125
	}
Line 123... Line 126...
123
 
126
 
Line 127... Line 130...
127
void i915_gem_context_free(struct kref *ctx_ref)
130
void i915_gem_context_free(struct kref *ctx_ref)
128
{
131
{
129
	struct i915_hw_context *ctx = container_of(ctx_ref,
132
	struct i915_hw_context *ctx = container_of(ctx_ref,
130
						   typeof(*ctx), ref);
133
						   typeof(*ctx), ref);
Line -... Line 134...
-
 
134
 
131
 
135
	list_del(&ctx->link);
132
	drm_gem_object_unreference(&ctx->obj->base);
136
	drm_gem_object_unreference(&ctx->obj->base);
133
	kfree(ctx);
137
	kfree(ctx);
Line 134... Line 138...
134
}
138
}
Line 145... Line 149...
145
	if (ctx == NULL)
149
	if (ctx == NULL)
146
		return ERR_PTR(-ENOMEM);
150
		return ERR_PTR(-ENOMEM);
Line 147... Line 151...
147
 
151
 
148
	kref_init(&ctx->ref);
152
	kref_init(&ctx->ref);
-
 
153
	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
149
	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
154
	INIT_LIST_HEAD(&ctx->link);
150
	if (ctx->obj == NULL) {
155
	if (ctx->obj == NULL) {
151
		kfree(ctx);
156
		kfree(ctx);
152
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
157
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
153
		return ERR_PTR(-ENOMEM);
158
		return ERR_PTR(-ENOMEM);
Line 164... Line 169...
164
	/* The ring associated with the context object is handled by the normal
169
	/* The ring associated with the context object is handled by the normal
165
	 * object tracking code. We give an initial ring value simple to pass an
170
	 * object tracking code. We give an initial ring value simple to pass an
166
	 * assertion in the context switch code.
171
	 * assertion in the context switch code.
167
	 */
172
	 */
168
	ctx->ring = &dev_priv->ring[RCS];
173
	ctx->ring = &dev_priv->ring[RCS];
-
 
174
	list_add_tail(&ctx->link, &dev_priv->context_list);
Line 169... Line 175...
169
 
175
 
170
	/* Default context will never have a file_priv */
176
	/* Default context will never have a file_priv */
171
	if (file_priv == NULL)
177
	if (file_priv == NULL)
Line 176... Line 182...
176
	if (ret < 0)
182
	if (ret < 0)
177
		goto err_out;
183
		goto err_out;
Line 178... Line 184...
178
 
184
 
179
	ctx->file_priv = file_priv;
185
	ctx->file_priv = file_priv;
-
 
186
	ctx->id = ret;
-
 
187
	/* NB: Mark all slices as needing a remap so that when the context first
-
 
188
	 * loads it will restore whatever remap state already exists. If there
-
 
189
	 * is no remap info, it will be a NOP. */
Line 180... Line 190...
180
	ctx->id = ret;
190
	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
Line 181... Line 191...
181
 
191
 
182
	return ctx;
192
	return ctx;
Line 211... Line 221...
211
	 * immediately switch back to the default context. This can cause a
221
	 * immediately switch back to the default context. This can cause a
212
	 * problem as pinning the default context also requires GTT space which
222
	 * problem as pinning the default context also requires GTT space which
213
	 * may not be available. To avoid this we always pin the
223
	 * may not be available. To avoid this we always pin the
214
	 * default context.
224
	 * default context.
215
	 */
225
	 */
216
	dev_priv->ring[RCS].default_context = ctx;
-
 
217
	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
226
	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
218
	if (ret) {
227
	if (ret) {
219
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
228
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
220
		goto err_destroy;
229
		goto err_destroy;
221
	}
230
	}
Line 224... Line 233...
224
	if (ret) {
233
	if (ret) {
225
		DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
234
		DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
226
		goto err_unpin;
235
		goto err_unpin;
227
	}
236
	}
Line -... Line 237...
-
 
237
 
-
 
238
	dev_priv->ring[RCS].default_context = ctx;
228
 
239
 
229
	DRM_DEBUG_DRIVER("Default HW context loaded\n");
240
	DRM_DEBUG_DRIVER("Default HW context loaded\n");
Line 230... Line 241...
230
	return 0;
241
	return 0;
231
 
242
 
232
err_unpin:
243
err_unpin:
233
	i915_gem_object_unpin(ctx->obj);
244
	i915_gem_object_unpin(ctx->obj);
234
err_destroy:
245
err_destroy:
235
	i915_gem_context_unreference(ctx);
246
	i915_gem_context_unreference(ctx);
Line 236... Line 247...
236
	return ret;
247
	return ret;
237
}
248
}
238
 
249
 
-
 
250
int i915_gem_context_init(struct drm_device *dev)
Line 239... Line 251...
239
void i915_gem_context_init(struct drm_device *dev)
251
{
240
{
-
 
241
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
242
 
252
	struct drm_i915_private *dev_priv = dev->dev_private;
243
	if (!HAS_HW_CONTEXTS(dev)) {
-
 
Line 244... Line 253...
244
		dev_priv->hw_contexts_disabled = true;
253
	int ret;
245
		DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
-
 
246
		return;
254
 
247
	}
255
	if (!HAS_HW_CONTEXTS(dev))
Line 248... Line 256...
248
 
256
		return 0;
Line 249... Line 257...
249
	/* If called from reset, or thaw... we've been here already */
257
 
250
	if (dev_priv->hw_contexts_disabled ||
-
 
251
	    dev_priv->ring[RCS].default_context)
258
	/* If called from reset, or thaw... we've been here already */
252
		return;
259
	if (dev_priv->ring[RCS].default_context)
253
 
260
		return 0;
Line 254... Line 261...
254
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
261
 
255
 
262
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
256
	if (dev_priv->hw_context_size > (1<<20)) {
263
 
-
 
264
	if (dev_priv->hw_context_size > (1<<20)) {
257
		dev_priv->hw_contexts_disabled = true;
265
		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
258
		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
266
		return -E2BIG;
Line 259... Line 267...
259
		return;
267
	}
-
 
268
 
260
	}
269
	ret = create_default_context(dev_priv);
Line 261... Line 270...
261
 
270
	if (ret) {
262
	if (create_default_context(dev_priv)) {
271
		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
263
		dev_priv->hw_contexts_disabled = true;
272
				 ret);
264
		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
273
		return ret;
Line 265... Line 274...
265
		return;
274
	}
266
	}
275
 
Line 267... Line 276...
267
 
276
	DRM_DEBUG_DRIVER("HW context support initialized\n");
268
	DRM_DEBUG_DRIVER("HW context support initialized\n");
277
	return 0;
269
}
278
}
270
 
279
 
Line 271... Line -...
271
void i915_gem_context_fini(struct drm_device *dev)
-
 
272
{
-
 
273
	struct drm_i915_private *dev_priv = dev->dev_private;
280
void i915_gem_context_fini(struct drm_device *dev)
274
	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
281
{
275
 
282
	struct drm_i915_private *dev_priv = dev->dev_private;
276
	if (dev_priv->hw_contexts_disabled)
283
	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
277
		return;
284
 
278
 
285
	if (!HAS_HW_CONTEXTS(dev))
-
 
286
		return;
-
 
287
 
-
 
288
	/* The only known way to stop the gpu from accessing the hw context is
-
 
289
	 * to reset it. Do this as the very last operation to avoid confusing
279
	/* The only known way to stop the gpu from accessing the hw context is
290
	 * other code, leading to spurious errors. */
280
	 * to reset it. Do this as the very last operation to avoid confusing
291
	intel_gpu_reset(dev);
281
	 * other code, leading to spurious errors. */
292
 
Line -... Line 293...
-
 
293
	/* When default context is created and switched to, base object refcount
-
 
294
	 * will be 2 (+1 from object creation and +1 from do_switch()).
-
 
295
	 * i915_gem_context_fini() will be called after gpu_idle() has switched
-
 
296
	 * to default context. So we need to unreference the base object once
-
 
297
	 * to offset the do_switch part, so that i915_gem_context_unreference()
-
 
298
	 * can then free the base object correctly. */
282
	intel_gpu_reset(dev);
299
	WARN_ON(!dev_priv->ring[RCS].last_context);
283
 
300
	if (dev_priv->ring[RCS].last_context == dctx) {
284
	i915_gem_object_unpin(dctx->obj);
301
		/* Fake switch to NULL context */
Line 285... Line 302...
285
 
302
		WARN_ON(dctx->obj->active);
Line 306... Line 323...
306
struct i915_ctx_hang_stats *
323
struct i915_ctx_hang_stats *
307
i915_gem_context_get_hang_stats(struct drm_device *dev,
324
i915_gem_context_get_hang_stats(struct drm_device *dev,
308
				struct drm_file *file,
325
				struct drm_file *file,
309
				u32 id)
326
				u32 id)
310
{
327
{
311
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
312
	struct drm_i915_file_private *file_priv = file->driver_priv;
328
	struct drm_i915_file_private *file_priv = file->driver_priv;
313
	struct i915_hw_context *ctx;
329
	struct i915_hw_context *ctx;
Line 314... Line 330...
314
 
330
 
315
	if (id == DEFAULT_CONTEXT_ID)
331
	if (id == DEFAULT_CONTEXT_ID)
Line 316... Line 332...
316
		return &file_priv->hang_stats;
332
		return &file_priv->hang_stats;
317
 
333
 
-
 
334
	if (!HAS_HW_CONTEXTS(dev))
318
	ctx = NULL;
335
		return ERR_PTR(-ENOENT);
319
	if (!dev_priv->hw_contexts_disabled)
336
 
320
		ctx = i915_gem_context_get(file->driver_priv, id);
337
		ctx = i915_gem_context_get(file->driver_priv, id);
Line 321... Line 338...
321
	if (ctx == NULL)
338
	if (ctx == NULL)
Line 389... Line 406...
389
static int do_switch(struct i915_hw_context *to)
406
static int do_switch(struct i915_hw_context *to)
390
{
407
{
391
	struct intel_ring_buffer *ring = to->ring;
408
	struct intel_ring_buffer *ring = to->ring;
392
	struct i915_hw_context *from = ring->last_context;
409
	struct i915_hw_context *from = ring->last_context;
393
	u32 hw_flags = 0;
410
	u32 hw_flags = 0;
394
	int ret;
411
	int ret, i;
Line 395... Line 412...
395
 
412
 
Line 396... Line 413...
396
	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
413
	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
397
 
414
 
Line 398... Line 415...
398
	if (from == to)
415
	if (from == to && !to->remap_slice)
399
		return 0;
416
		return 0;
400
 
417
 
Line 426... Line 443...
426
	if (!to->obj->has_global_gtt_mapping)
443
	if (!to->obj->has_global_gtt_mapping)
427
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
444
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
Line 428... Line 445...
428
 
445
 
429
	if (!to->is_initialized || is_default_context(to))
446
	if (!to->is_initialized || is_default_context(to))
430
		hw_flags |= MI_RESTORE_INHIBIT;
-
 
431
	else if (WARN_ON_ONCE(from == to)) /* not yet expected */
-
 
Line 432... Line 447...
432
		hw_flags |= MI_FORCE_RESTORE;
447
		hw_flags |= MI_RESTORE_INHIBIT;
433
 
448
 
434
	ret = mi_set_context(ring, to, hw_flags);
449
	ret = mi_set_context(ring, to, hw_flags);
435
	if (ret) {
450
	if (ret) {
436
		i915_gem_object_unpin(to->obj);
451
		i915_gem_object_unpin(to->obj);
Line -... Line 452...
-
 
452
		return ret;
-
 
453
	}
-
 
454
 
-
 
455
	for (i = 0; i < MAX_L3_SLICES; i++) {
-
 
456
		if (!(to->remap_slice & (1<
-
 
457
			continue;
-
 
458
 
-
 
459
		ret = i915_gem_l3_remap(ring, i);
-
 
460
		/* If it failed, try again next round */
-
 
461
		if (ret)
-
 
462
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
-
 
463
		else
437
		return ret;
464
			to->remap_slice &= ~(1<
438
	}
465
	}
439
 
466
 
440
	/* The backing object for the context is done after switching to the
467
	/* The backing object for the context is done after switching to the
441
	 * *next* context. Therefore we cannot retire the previous context until
468
	 * *next* context. Therefore we cannot retire the previous context until
442
	 * the next context has already started running. In fact, the below code
469
	 * the next context has already started running. In fact, the below code
443
	 * is a bit suboptimal because the retiring can occur simply after the
470
	 * is a bit suboptimal because the retiring can occur simply after the
444
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
-
 
445
	 */
-
 
446
	if (from != NULL) {
471
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
447
		struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
-
 
448
		struct i915_address_space *ggtt = &dev_priv->gtt.base;
472
	 */
449
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
473
	if (from != NULL) {
450
		list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
474
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
451
		i915_gem_object_move_to_active(from->obj, ring);
475
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
452
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
476
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
453
		 * whole damn pipeline, we don't need to explicitly mark the
477
		 * whole damn pipeline, we don't need to explicitly mark the
454
		 * object dirty. The only exception is that the context must be
478
		 * object dirty. The only exception is that the context must be
455
		 * correct in case the object gets swapped out. Ideally we'd be
479
		 * correct in case the object gets swapped out. Ideally we'd be
456
		 * able to defer doing this until we know the object would be
480
		 * able to defer doing this until we know the object would be
457
		 * swapped, but there is no way to do that yet.
481
		 * swapped, but there is no way to do that yet.
Line 458... Line -...
458
		 */
-
 
459
		from->obj->dirty = 1;
-
 
460
		BUG_ON(from->obj->ring != ring);
-
 
461
 
-
 
462
		ret = i915_add_request(ring, NULL);
482
		 */
463
		if (ret) {
-
 
464
			/* Too late, we've already scheduled a context switch.
-
 
465
			 * Try to undo the change so that the hw state is
-
 
466
			 * consistent with out tracking. In case of emergency,
-
 
467
			 * scream.
-
 
468
			 */
-
 
469
			WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
483
		from->obj->dirty = 1;
470
			return ret;
484
		BUG_ON(from->obj->ring != ring);
471
		}
485
 
Line 472... Line 486...
472
 
486
		/* obj is kept alive until the next request by its active ref */
Line 484... Line 498...
484
/**
498
/**
485
 * i915_switch_context() - perform a GPU context switch.
499
 * i915_switch_context() - perform a GPU context switch.
486
 * @ring: ring for which we'll execute the context switch
500
 * @ring: ring for which we'll execute the context switch
487
 * @file_priv: file_priv associated with the context, may be NULL
501
 * @file_priv: file_priv associated with the context, may be NULL
488
 * @id: context id number
502
 * @id: context id number
489
 * @seqno: sequence number by which the new context will be switched to
-
 
490
 * @flags:
-
 
491
 *
503
 *
492
 * The context life cycle is simple. The context refcount is incremented and
504
 * The context life cycle is simple. The context refcount is incremented and
493
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
505
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
494
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
506
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
495
 * object while letting the normal object tracking destroy the backing BO.
507
 * object while letting the normal object tracking destroy the backing BO.
Line 499... Line 511...
499
			int to_id)
511
			int to_id)
500
{
512
{
501
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
513
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
502
	struct i915_hw_context *to;
514
	struct i915_hw_context *to;
Line 503... Line 515...
503
 
515
 
504
	if (dev_priv->hw_contexts_disabled)
516
	if (!HAS_HW_CONTEXTS(ring->dev))
Line 505... Line 517...
505
		return 0;
517
		return 0;
Line 506... Line 518...
506
 
518
 
Line 524... Line 536...
524
}
536
}
Line 525... Line 537...
525
 
537
 
526
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
538
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
527
				  struct drm_file *file)
539
				  struct drm_file *file)
528
{
-
 
529
	struct drm_i915_private *dev_priv = dev->dev_private;
540
{
530
	struct drm_i915_gem_context_create *args = data;
541
	struct drm_i915_gem_context_create *args = data;
531
	struct drm_i915_file_private *file_priv = file->driver_priv;
542
	struct drm_i915_file_private *file_priv = file->driver_priv;
532
	struct i915_hw_context *ctx;
543
	struct i915_hw_context *ctx;
Line 533... Line 544...
533
	int ret;
544
	int ret;
534
 
545
 
Line 535... Line 546...
535
	if (!(dev->driver->driver_features & DRIVER_GEM))
546
	if (!(dev->driver->driver_features & DRIVER_GEM))
536
		return -ENODEV;
547
		return -ENODEV;
Line 537... Line 548...
537
 
548
 
538
	if (dev_priv->hw_contexts_disabled)
549
	if (!HAS_HW_CONTEXTS(dev))
539
		return -ENODEV;
550
		return -ENODEV;