Subversion Repositories Kolibri OS

Rev

Rev 3746 | Rev 4246 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3746 Rev 4104
Line 111... Line 111...
111
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
111
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
112
		break;
112
		break;
113
	case 7:
113
	case 7:
114
		reg = I915_READ(GEN7_CXT_SIZE);
114
		reg = I915_READ(GEN7_CXT_SIZE);
115
		if (IS_HASWELL(dev))
115
		if (IS_HASWELL(dev))
116
			ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
116
			ret = HSW_CXT_TOTAL_SIZE;
117
		else
117
		else
118
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
118
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
119
		break;
119
		break;
120
	default:
120
	default:
121
		BUG();
121
		BUG();
122
	}
122
	}
Line 123... Line 123...
123
 
123
 
124
	return ret;
124
	return ret;
Line 125... Line 125...
125
}
125
}
126
 
126
 
127
static void do_destroy(struct i915_hw_context *ctx)
127
void i915_gem_context_free(struct kref *ctx_ref)
128
{
128
{
Line 129... Line 129...
129
	if (ctx->file_priv)
129
	struct i915_hw_context *ctx = container_of(ctx_ref,
130
		idr_remove(&ctx->file_priv->context_idr, ctx->id);
130
						   typeof(*ctx), ref);
131
 
131
 
Line 143... Line 143...
143
 
143
 
144
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
144
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
145
	if (ctx == NULL)
145
	if (ctx == NULL)
Line -... Line 146...
-
 
146
		return ERR_PTR(-ENOMEM);
146
		return ERR_PTR(-ENOMEM);
147
 
147
 
148
	kref_init(&ctx->ref);
148
	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
149
	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
149
	if (ctx->obj == NULL) {
150
	if (ctx->obj == NULL) {
150
		kfree(ctx);
151
		kfree(ctx);
151
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
152
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
Line 152... Line 153...
152
		return ERR_PTR(-ENOMEM);
153
		return ERR_PTR(-ENOMEM);
153
	}
154
	}
154
 
155
 
-
 
156
	if (INTEL_INFO(dev)->gen >= 7) {
155
	if (INTEL_INFO(dev)->gen >= 7) {
157
		ret = i915_gem_object_set_cache_level(ctx->obj,
156
		ret = i915_gem_object_set_cache_level(ctx->obj,
158
						      I915_CACHE_L3_LLC);
157
						      I915_CACHE_LLC_MLC);
159
		/* Failure shouldn't ever happen this early */
Line 158... Line 160...
158
		if (ret)
160
		if (WARN_ON(ret))
159
			goto err_out;
161
			goto err_out;
Line 167... Line 169...
167
 
169
 
168
	/* Default context will never have a file_priv */
170
	/* Default context will never have a file_priv */
169
	if (file_priv == NULL)
171
	if (file_priv == NULL)
Line 170... Line -...
170
		return ctx;
-
 
171
 
-
 
172
	ctx->file_priv = file_priv;
172
		return ctx;
173
 
173
 
174
	ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
174
	ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
175
			GFP_KERNEL);
175
			GFP_KERNEL);
-
 
176
	if (ret < 0)
-
 
177
		goto err_out;
176
	if (ret < 0)
178
 
Line 177... Line 179...
177
		goto err_out;
179
	ctx->file_priv = file_priv;
Line 178... Line 180...
178
	ctx->id = ret;
180
	ctx->id = ret;
179
 
181
 
180
	return ctx;
182
	return ctx;
181
 
183
 
Line 182... Line 184...
182
err_out:
184
err_out:
183
	do_destroy(ctx);
185
	i915_gem_context_unreference(ctx);
Line 210... Line 212...
210
	 * problem as pinning the default context also requires GTT space which
212
	 * problem as pinning the default context also requires GTT space which
211
	 * may not be available. To avoid this we always pin the
213
	 * may not be available. To avoid this we always pin the
212
	 * default context.
214
	 * default context.
213
	 */
215
	 */
214
	dev_priv->ring[RCS].default_context = ctx;
216
	dev_priv->ring[RCS].default_context = ctx;
215
	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
217
	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
216
	if (ret)
218
	if (ret) {
-
 
219
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
217
		goto err_destroy;
220
		goto err_destroy;
-
 
221
	}
Line 218... Line 222...
218
 
222
 
219
	ret = do_switch(ctx);
223
	ret = do_switch(ctx);
-
 
224
	if (ret) {
220
	if (ret)
225
		DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
-
 
226
		goto err_unpin;
Line 221... Line 227...
221
		goto err_unpin;
227
	}
222
 
228
 
Line 223... Line 229...
223
	DRM_DEBUG_DRIVER("Default HW context loaded\n");
229
	DRM_DEBUG_DRIVER("Default HW context loaded\n");
224
	return 0;
230
	return 0;
225
 
231
 
226
err_unpin:
232
err_unpin:
227
	i915_gem_object_unpin(ctx->obj);
233
	i915_gem_object_unpin(ctx->obj);
228
err_destroy:
234
err_destroy:
Line 229... Line 235...
229
	do_destroy(ctx);
235
	i915_gem_context_unreference(ctx);
230
	return ret;
236
	return ret;
231
}
237
}
Line 232... Line 238...
232
 
238
 
233
void i915_gem_context_init(struct drm_device *dev)
239
void i915_gem_context_init(struct drm_device *dev)
-
 
240
{
234
{
241
	struct drm_i915_private *dev_priv = dev->dev_private;
235
	struct drm_i915_private *dev_priv = dev->dev_private;
242
 
Line 236... Line 243...
236
 
243
	if (!HAS_HW_CONTEXTS(dev)) {
237
	if (!HAS_HW_CONTEXTS(dev)) {
244
		dev_priv->hw_contexts_disabled = true;
Line 246... Line 253...
246
 
253
 
Line 247... Line 254...
247
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
254
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
248
 
255
 
-
 
256
	if (dev_priv->hw_context_size > (1<<20)) {
249
	if (dev_priv->hw_context_size > (1<<20)) {
257
		dev_priv->hw_contexts_disabled = true;
250
		dev_priv->hw_contexts_disabled = true;
258
		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
Line 251... Line 259...
251
		return;
259
		return;
252
	}
260
	}
-
 
261
 
253
 
262
	if (create_default_context(dev_priv)) {
254
	if (create_default_context(dev_priv)) {
263
		dev_priv->hw_contexts_disabled = true;
Line 255... Line 264...
255
		dev_priv->hw_contexts_disabled = true;
264
		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
256
		return;
265
		return;
Line 257... Line 266...
257
	}
266
	}
258
 
267
 
259
	DRM_DEBUG_DRIVER("HW context support initialized\n");
268
	DRM_DEBUG_DRIVER("HW context support initialized\n");
-
 
269
}
Line 260... Line 270...
260
}
270
 
261
 
271
void i915_gem_context_fini(struct drm_device *dev)
Line 262... Line 272...
262
void i915_gem_context_fini(struct drm_device *dev)
272
{
263
{
273
	struct drm_i915_private *dev_priv = dev->dev_private;
264
	struct drm_i915_private *dev_priv = dev->dev_private;
274
	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
265
 
275
 
Line 266... Line 276...
266
	if (dev_priv->hw_contexts_disabled)
276
	if (dev_priv->hw_contexts_disabled)
Line -... Line 277...
-
 
277
		return;
-
 
278
 
-
 
279
	/* The only known way to stop the gpu from accessing the hw context is
-
 
280
	 * to reset it. Do this as the very last operation to avoid confusing
-
 
281
	 * other code, leading to spurious errors. */
267
		return;
282
	intel_gpu_reset(dev);
268
 
283
 
Line 269... Line 284...
269
	/* The only known way to stop the gpu from accessing the hw context is
284
	i915_gem_object_unpin(dctx->obj);
270
	 * to reset it. Do this as the very last operation to avoid confusing
285
 
271
	 * other code, leading to spurious errors. */
286
	/* When default context is created and switched to, base object refcount
Line 272... Line 287...
272
//	intel_gpu_reset(dev);
287
	 * will be 2 (+1 from object creation and +1 from do_switch()).
Line 273... Line -...
273
 
-
 
-
 
288
	 * i915_gem_context_fini() will be called after gpu_idle() has switched
Line 274... Line 289...
274
	i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
289
	 * to default context. So we need to unreference the base object once
275
 
290
	 * to offset the do_switch part, so that i915_gem_context_unreference()
Line 276... Line 291...
276
	do_destroy(dev_priv->ring[RCS].default_context);
291
	 * can then free the base object correctly. */
Line 323... Line 338...
323
 
338
 
324
	ret = intel_ring_begin(ring, 6);
339
	ret = intel_ring_begin(ring, 6);
325
	if (ret)
340
	if (ret)
Line -... Line 341...
-
 
341
		return ret;
326
		return ret;
342
 
327
 
343
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
328
	if (IS_GEN7(ring->dev))
344
	if (IS_GEN7(ring->dev))
329
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
345
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Line 330... Line 346...
330
	else
346
	else
331
		intel_ring_emit(ring, MI_NOOP);
347
		intel_ring_emit(ring, MI_NOOP);
332
 
348
 
333
	intel_ring_emit(ring, MI_NOOP);
349
	intel_ring_emit(ring, MI_NOOP);
334
	intel_ring_emit(ring, MI_SET_CONTEXT);
350
	intel_ring_emit(ring, MI_SET_CONTEXT);
335
	intel_ring_emit(ring, new_context->obj->gtt_offset |
351
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
336
			MI_MM_SPACE_GTT |
352
			MI_MM_SPACE_GTT |
337
			MI_SAVE_EXT_STATE_EN |
353
			MI_SAVE_EXT_STATE_EN |
Line 351... Line 367...
351
}
367
}
Line 352... Line 368...
352
 
368
 
353
static int do_switch(struct i915_hw_context *to)
369
static int do_switch(struct i915_hw_context *to)
354
{
370
{
355
	struct intel_ring_buffer *ring = to->ring;
371
	struct intel_ring_buffer *ring = to->ring;
356
	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
372
	struct i915_hw_context *from = ring->last_context;
357
	u32 hw_flags = 0;
373
	u32 hw_flags = 0;
Line 358... Line 374...
358
	int ret;
374
	int ret;
Line 359... Line 375...
359
 
375
 
360
	BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
376
	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
Line 361... Line 377...
361
 
377
 
362
	if (from_obj == to->obj)
378
	if (from == to)
363
		return 0;
379
		return 0;
Line 364... Line 380...
364
 
380
 
365
	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
381
	ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
Line 380... Line 396...
380
	if (!to->obj->has_global_gtt_mapping)
396
	if (!to->obj->has_global_gtt_mapping)
381
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
397
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
Line 382... Line 398...
382
 
398
 
383
	if (!to->is_initialized || is_default_context(to))
399
	if (!to->is_initialized || is_default_context(to))
384
		hw_flags |= MI_RESTORE_INHIBIT;
400
		hw_flags |= MI_RESTORE_INHIBIT;
385
	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
401
	else if (WARN_ON_ONCE(from == to)) /* not yet expected */
Line 386... Line 402...
386
		hw_flags |= MI_FORCE_RESTORE;
402
		hw_flags |= MI_FORCE_RESTORE;
387
 
403
 
388
	ret = mi_set_context(ring, to, hw_flags);
404
	ret = mi_set_context(ring, to, hw_flags);
Line 395... Line 411...
395
	 * *next* context. Therefore we cannot retire the previous context until
411
	 * *next* context. Therefore we cannot retire the previous context until
396
	 * the next context has already started running. In fact, the below code
412
	 * the next context has already started running. In fact, the below code
397
	 * is a bit suboptimal because the retiring can occur simply after the
413
	 * is a bit suboptimal because the retiring can occur simply after the
398
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
414
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
399
	 */
415
	 */
400
	if (from_obj != NULL) {
416
	if (from != NULL) {
-
 
417
		struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
-
 
418
		struct i915_address_space *ggtt = &dev_priv->gtt.base;
401
		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
419
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-
 
420
		list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
402
		i915_gem_object_move_to_active(from_obj, ring);
421
		i915_gem_object_move_to_active(from->obj, ring);
403
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
422
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
404
		 * whole damn pipeline, we don't need to explicitly mark the
423
		 * whole damn pipeline, we don't need to explicitly mark the
405
		 * object dirty. The only exception is that the context must be
424
		 * object dirty. The only exception is that the context must be
406
		 * correct in case the object gets swapped out. Ideally we'd be
425
		 * correct in case the object gets swapped out. Ideally we'd be
407
		 * able to defer doing this until we know the object would be
426
		 * able to defer doing this until we know the object would be
408
		 * swapped, but there is no way to do that yet.
427
		 * swapped, but there is no way to do that yet.
409
		 */
428
		 */
410
		from_obj->dirty = 1;
429
		from->obj->dirty = 1;
411
		BUG_ON(from_obj->ring != ring);
430
		BUG_ON(from->obj->ring != ring);
412
		i915_gem_object_unpin(from_obj);
-
 
Line -... Line 431...
-
 
431
 
-
 
432
		ret = i915_add_request(ring, NULL);
-
 
433
		if (ret) {
-
 
434
			/* Too late, we've already scheduled a context switch.
-
 
435
			 * Try to undo the change so that the hw state is
-
 
436
			 * consistent with out tracking. In case of emergency,
-
 
437
			 * scream.
-
 
438
			 */
-
 
439
			WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
-
 
440
			return ret;
-
 
441
		}
-
 
442
 
413
 
443
		i915_gem_object_unpin(from->obj);
414
		drm_gem_object_unreference(&from_obj->base);
444
		i915_gem_context_unreference(from);
Line 415... Line 445...
415
	}
445
	}
416
 
446
 
417
	drm_gem_object_reference(&to->obj->base);
447
	i915_gem_context_reference(to);
Line 418... Line 448...
418
	ring->last_context_obj = to->obj;
448
	ring->last_context = to;
419
	to->is_initialized = true;
449
	to->is_initialized = true;
Line 442... Line 472...
442
	struct i915_hw_context *to;
472
	struct i915_hw_context *to;
Line 443... Line 473...
443
 
473
 
444
	if (dev_priv->hw_contexts_disabled)
474
	if (dev_priv->hw_contexts_disabled)
Line -... Line 475...
-
 
475
		return 0;
-
 
476
 
445
		return 0;
477
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
446
 
478
 
Line 447... Line 479...
447
	if (ring != &dev_priv->ring[RCS])
479
	if (ring != &dev_priv->ring[RCS])
448
		return 0;
480
		return 0;
Line 511... Line 543...
511
	if (!ctx) {
543
	if (!ctx) {
512
		mutex_unlock(&dev->struct_mutex);
544
		mutex_unlock(&dev->struct_mutex);
513
		return -ENOENT;
545
		return -ENOENT;
514
	}
546
	}
Line 515... Line -...
515
 
-
 
Line 516... Line 547...
516
	do_destroy(ctx);
547
 
Line 517... Line 548...
517
 
548
 
518
	mutex_unlock(&dev->struct_mutex);
549
	mutex_unlock(&dev->struct_mutex);