Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3031 serge 1
/*
2
 * Copyright © 2011-2012 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Ben Widawsky 
25
 *
26
 */
27
 
28
/*
29
 * This file implements HW context support. On gen5+ a HW context consists of an
30
 * opaque GPU object which is referenced at times of context saves and restores.
31
 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32
 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33
 * something like a context does exist for the media ring, the code only
34
 * supports contexts for the render ring.
35
 *
36
 * In software, there is a distinction between contexts created by the user,
37
 * and the default HW context. The default HW context is used by GPU clients
38
 * that do not request setup of their own hardware context. The default
39
 * context's state is never restored to help prevent programming errors. This
40
 * would happen if a client ran and piggy-backed off another clients GPU state.
41
 * The default context only exists to give the GPU some offset to load as the
42
 * current to invoke a save of the context we actually care about. In fact, the
43
 * code could likely be constructed, albeit in a more complicated fashion, to
44
 * never use the default context, though that limits the driver's ability to
45
 * swap out, and/or destroy other contexts.
46
 *
47
 * All other contexts are created as a request by the GPU client. These contexts
48
 * store GPU state, and thus allow GPU clients to not re-emit state (and
49
 * potentially query certain state) at any time. The kernel driver makes
50
 * certain that the appropriate commands are inserted.
51
 *
52
 * The context life cycle is semi-complicated in that context BOs may live
53
 * longer than the context itself because of the way the hardware, and object
54
 * tracking works. Below is a very crude representation of the state machine
55
 * describing the context life.
56
 *                                         refcount     pincount     active
57
 * S0: initial state                          0            0           0
58
 * S1: context created                        1            0           0
59
 * S2: context is currently running           2            1           X
60
 * S3: GPU referenced, but not current        2            0           1
61
 * S4: context is current, but destroyed      1            1           0
62
 * S5: like S3, but destroyed                 1            0           1
63
 *
64
 * The most common (but not all) transitions:
65
 * S0->S1: client creates a context
66
 * S1->S2: client submits execbuf with context
67
 * S2->S3: other clients submits execbuf with context
68
 * S3->S1: context object was retired
69
 * S3->S2: clients submits another execbuf
70
 * S2->S4: context destroy called with current context
71
 * S3->S5->S0: destroy path
72
 * S4->S5->S0: destroy path on current context
73
 *
74
 * There are two confusing terms used above:
75
 *  The "current context" means the context which is currently running on the
4560 Serge 76
 *  GPU. The GPU has loaded its state already and has stored away the gtt
3031 serge 77
 *  offset of the BO. The GPU is not actively referencing the data at this
78
 *  offset, but it will on the next context switch. The only way to avoid this
79
 *  is to do a GPU reset.
80
 *
81
 *  An "active context' is one which was previously the "current context" and is
82
 *  on the active list waiting for the next context switch to occur. Until this
83
 *  happens, the object must remain at the same gtt offset. It is therefore
84
 *  possible to destroy a context, but it is still active.
85
 *
86
 */
87
 
88
#include 
89
#include 
90
#include "i915_drv.h"
91
 
92
/* This is a HW constraint. The value below is the largest known requirement
93
 * I've seen in a spec to date, and that was a workaround for a non-shipping
94
 * part. It should be safe to decrease this, but it's more future proof as is.
95
 */
96
#define CONTEXT_ALIGN (64<<10)
97
 
98
static struct i915_hw_context *
99
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
100
static int do_switch(struct i915_hw_context *to);
101
 
102
static int get_context_size(struct drm_device *dev)
103
{
104
	struct drm_i915_private *dev_priv = dev->dev_private;
105
	int ret;
106
	u32 reg;
107
 
108
	switch (INTEL_INFO(dev)->gen) {
109
	case 6:
110
		reg = I915_READ(CXT_SIZE);
111
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
112
		break;
113
	case 7:
114
		reg = I915_READ(GEN7_CXT_SIZE);
115
		if (IS_HASWELL(dev))
4104 Serge 116
			ret = HSW_CXT_TOTAL_SIZE;
3031 serge 117
		else
118
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
119
		break;
4560 Serge 120
	case 8:
121
		ret = GEN8_CXT_TOTAL_SIZE;
122
		break;
3031 serge 123
	default:
124
		BUG();
125
	}
126
 
127
	return ret;
128
}
129
 
4104 Serge 130
void i915_gem_context_free(struct kref *ctx_ref)
3031 serge 131
{
4104 Serge 132
	struct i915_hw_context *ctx = container_of(ctx_ref,
133
						   typeof(*ctx), ref);
3031 serge 134
 
4560 Serge 135
	list_del(&ctx->link);
3031 serge 136
	drm_gem_object_unreference(&ctx->obj->base);
137
	kfree(ctx);
138
}
139
 
140
static struct i915_hw_context *
141
create_hw_context(struct drm_device *dev,
142
		  struct drm_i915_file_private *file_priv)
143
{
144
	struct drm_i915_private *dev_priv = dev->dev_private;
145
	struct i915_hw_context *ctx;
3480 Serge 146
	int ret;
3031 serge 147
 
3243 Serge 148
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3031 serge 149
	if (ctx == NULL)
150
		return ERR_PTR(-ENOMEM);
151
 
4104 Serge 152
	kref_init(&ctx->ref);
3031 serge 153
	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
4560 Serge 154
	INIT_LIST_HEAD(&ctx->link);
3031 serge 155
	if (ctx->obj == NULL) {
156
		kfree(ctx);
157
		DRM_DEBUG_DRIVER("Context object allocated failed\n");
158
		return ERR_PTR(-ENOMEM);
159
	}
160
 
3746 Serge 161
	if (INTEL_INFO(dev)->gen >= 7) {
162
		ret = i915_gem_object_set_cache_level(ctx->obj,
4104 Serge 163
						      I915_CACHE_L3_LLC);
164
		/* Failure shouldn't ever happen this early */
165
		if (WARN_ON(ret))
3746 Serge 166
			goto err_out;
167
	}
168
 
3031 serge 169
	/* The ring associated with the context object is handled by the normal
170
	 * object tracking code. We give an initial ring value simple to pass an
171
	 * assertion in the context switch code.
172
	 */
173
	ctx->ring = &dev_priv->ring[RCS];
4560 Serge 174
	list_add_tail(&ctx->link, &dev_priv->context_list);
3031 serge 175
 
176
	/* Default context will never have a file_priv */
177
	if (file_priv == NULL)
178
		return ctx;
179
 
3480 Serge 180
	ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
181
			GFP_KERNEL);
182
	if (ret < 0)
3031 serge 183
		goto err_out;
4104 Serge 184
 
185
	ctx->file_priv = file_priv;
3480 Serge 186
	ctx->id = ret;
4560 Serge 187
	/* NB: Mark all slices as needing a remap so that when the context first
188
	 * loads it will restore whatever remap state already exists. If there
189
	 * is no remap info, it will be a NOP. */
190
	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
3031 serge 191
 
192
	return ctx;
193
 
194
err_out:
4104 Serge 195
	i915_gem_context_unreference(ctx);
3031 serge 196
	return ERR_PTR(ret);
197
}
198
 
199
static inline bool is_default_context(struct i915_hw_context *ctx)
200
{
201
	return (ctx == ctx->ring->default_context);
202
}
203
 
204
/**
205
 * The default context needs to exist per ring that uses contexts. It stores the
206
 * context state of the GPU for applications that don't utilize HW contexts, as
207
 * well as an idle case.
208
 */
209
static int create_default_context(struct drm_i915_private *dev_priv)
210
{
211
	struct i915_hw_context *ctx;
212
	int ret;
213
 
214
	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
215
 
216
	ctx = create_hw_context(dev_priv->dev, NULL);
217
	if (IS_ERR(ctx))
218
		return PTR_ERR(ctx);
219
 
220
	/* We may need to do things with the shrinker which require us to
221
	 * immediately switch back to the default context. This can cause a
222
	 * problem as pinning the default context also requires GTT space which
223
	 * may not be available. To avoid this we always pin the
224
	 * default context.
225
	 */
4104 Serge 226
	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
227
	if (ret) {
228
		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
3031 serge 229
		goto err_destroy;
4104 Serge 230
	}
3031 serge 231
 
232
	ret = do_switch(ctx);
4104 Serge 233
	if (ret) {
234
		DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
3031 serge 235
		goto err_unpin;
4104 Serge 236
	}
3031 serge 237
 
4560 Serge 238
	dev_priv->ring[RCS].default_context = ctx;
239
 
3031 serge 240
	DRM_DEBUG_DRIVER("Default HW context loaded\n");
241
	return 0;
242
 
243
err_unpin:
244
	i915_gem_object_unpin(ctx->obj);
245
err_destroy:
4104 Serge 246
	i915_gem_context_unreference(ctx);
3031 serge 247
	return ret;
248
}
249
 
4560 Serge 250
int i915_gem_context_init(struct drm_device *dev)
3031 serge 251
{
252
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 253
	int ret;
3031 serge 254
 
4560 Serge 255
	if (!HAS_HW_CONTEXTS(dev))
256
		return 0;
3031 serge 257
 
258
	/* If called from reset, or thaw... we've been here already */
4560 Serge 259
	if (dev_priv->ring[RCS].default_context)
260
		return 0;
3031 serge 261
 
3480 Serge 262
	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
3031 serge 263
 
3480 Serge 264
	if (dev_priv->hw_context_size > (1<<20)) {
4104 Serge 265
		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
4560 Serge 266
		return -E2BIG;
3031 serge 267
	}
268
 
4560 Serge 269
	ret = create_default_context(dev_priv);
270
	if (ret) {
271
		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
272
				 ret);
273
		return ret;
3031 serge 274
	}
275
 
276
	DRM_DEBUG_DRIVER("HW context support initialized\n");
4560 Serge 277
	return 0;
3031 serge 278
}
279
 
280
void i915_gem_context_fini(struct drm_device *dev)
281
{
282
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 283
	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
3031 serge 284
 
4560 Serge 285
	if (!HAS_HW_CONTEXTS(dev))
3031 serge 286
		return;
287
 
288
	/* The only known way to stop the gpu from accessing the hw context is
289
	 * to reset it. Do this as the very last operation to avoid confusing
290
	 * other code, leading to spurious errors. */
4104 Serge 291
	intel_gpu_reset(dev);
3031 serge 292
 
4104 Serge 293
	/* When default context is created and switched to, base object refcount
294
	 * will be 2 (+1 from object creation and +1 from do_switch()).
295
	 * i915_gem_context_fini() will be called after gpu_idle() has switched
296
	 * to default context. So we need to unreference the base object once
297
	 * to offset the do_switch part, so that i915_gem_context_unreference()
298
	 * can then free the base object correctly. */
4560 Serge 299
	WARN_ON(!dev_priv->ring[RCS].last_context);
300
	if (dev_priv->ring[RCS].last_context == dctx) {
301
		/* Fake switch to NULL context */
302
		WARN_ON(dctx->obj->active);
303
		i915_gem_object_unpin(dctx->obj);
4246 Serge 304
	i915_gem_context_unreference(dctx);
4560 Serge 305
	}
306
 
307
	i915_gem_object_unpin(dctx->obj);
308
	i915_gem_context_unreference(dctx);
309
	dev_priv->ring[RCS].default_context = NULL;
310
	dev_priv->ring[RCS].last_context = NULL;
3031 serge 311
}
312
 
313
static int context_idr_cleanup(int id, void *p, void *data)
314
{
315
	struct i915_hw_context *ctx = p;
316
 
317
	BUG_ON(id == DEFAULT_CONTEXT_ID);
318
 
4280 Serge 319
	i915_gem_context_unreference(ctx);
320
	return 0;
321
}
3031 serge 322
 
4280 Serge 323
struct i915_ctx_hang_stats *
324
i915_gem_context_get_hang_stats(struct drm_device *dev,
325
				struct drm_file *file,
326
				u32 id)
327
{
328
	struct drm_i915_file_private *file_priv = file->driver_priv;
329
	struct i915_hw_context *ctx;
4104 Serge 330
 
4280 Serge 331
	if (id == DEFAULT_CONTEXT_ID)
332
		return &file_priv->hang_stats;
333
 
4560 Serge 334
	if (!HAS_HW_CONTEXTS(dev))
335
		return ERR_PTR(-ENOENT);
336
 
4280 Serge 337
		ctx = i915_gem_context_get(file->driver_priv, id);
338
	if (ctx == NULL)
339
		return ERR_PTR(-ENOENT);
340
 
341
	return &ctx->hang_stats;
3031 serge 342
}
343
 
344
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
345
{
346
	struct drm_i915_file_private *file_priv = file->driver_priv;
347
 
4246 Serge 348
	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
3031 serge 349
	idr_destroy(&file_priv->context_idr);
350
}
351
 
352
static struct i915_hw_context *
353
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
354
{
355
	return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
356
}
357
 
358
static inline int
359
mi_set_context(struct intel_ring_buffer *ring,
360
	       struct i915_hw_context *new_context,
361
	       u32 hw_flags)
362
{
363
	int ret;
364
 
365
	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
366
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
367
	 * explicitly, so we rely on the value at ring init, stored in
368
	 * itlb_before_ctx_switch.
369
	 */
370
	if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
371
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
372
		if (ret)
373
			return ret;
374
	}
375
 
376
	ret = intel_ring_begin(ring, 6);
377
	if (ret)
378
		return ret;
379
 
4104 Serge 380
	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
3031 serge 381
	if (IS_GEN7(ring->dev))
382
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
383
	else
384
		intel_ring_emit(ring, MI_NOOP);
385
 
386
	intel_ring_emit(ring, MI_NOOP);
387
	intel_ring_emit(ring, MI_SET_CONTEXT);
4104 Serge 388
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
3031 serge 389
			MI_MM_SPACE_GTT |
390
			MI_SAVE_EXT_STATE_EN |
391
			MI_RESTORE_EXT_STATE_EN |
392
			hw_flags);
393
	/* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
394
	intel_ring_emit(ring, MI_NOOP);
395
 
396
	if (IS_GEN7(ring->dev))
397
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
398
	else
399
		intel_ring_emit(ring, MI_NOOP);
400
 
401
	intel_ring_advance(ring);
402
 
403
	return ret;
404
}
405
 
406
static int do_switch(struct i915_hw_context *to)
407
{
408
	struct intel_ring_buffer *ring = to->ring;
4104 Serge 409
	struct i915_hw_context *from = ring->last_context;
3031 serge 410
	u32 hw_flags = 0;
4560 Serge 411
	int ret, i;
3031 serge 412
 
4104 Serge 413
	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
3031 serge 414
 
4560 Serge 415
	if (from == to && !to->remap_slice)
3031 serge 416
		return 0;
417
 
4104 Serge 418
	ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
3031 serge 419
	if (ret)
420
		return ret;
421
 
4539 Serge 422
	/*
423
	 * Pin can switch back to the default context if we end up calling into
424
	 * evict_everything - as a last ditch gtt defrag effort that also
425
	 * switches to the default context. Hence we need to reload from here.
426
	 */
427
	from = ring->last_context;
428
 
429
	/*
430
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
3031 serge 431
	 * that thanks to write = false in this call and us not setting any gpu
432
	 * write domains when putting a context object onto the active list
433
	 * (when switching away from it), this won't block.
4539 Serge 434
	 *
435
	 * XXX: We need a real interface to do this instead of trickery.
436
	 */
3031 serge 437
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
438
	if (ret) {
439
		i915_gem_object_unpin(to->obj);
440
		return ret;
441
	}
442
 
443
	if (!to->obj->has_global_gtt_mapping)
444
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
445
 
446
	if (!to->is_initialized || is_default_context(to))
447
		hw_flags |= MI_RESTORE_INHIBIT;
448
 
449
	ret = mi_set_context(ring, to, hw_flags);
450
	if (ret) {
451
		i915_gem_object_unpin(to->obj);
452
		return ret;
453
	}
454
 
4560 Serge 455
	for (i = 0; i < MAX_L3_SLICES; i++) {
456
		if (!(to->remap_slice & (1<
457
			continue;
458
 
459
		ret = i915_gem_l3_remap(ring, i);
460
		/* If it failed, try again next round */
461
		if (ret)
462
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
463
		else
464
			to->remap_slice &= ~(1<
465
	}
466
 
3031 serge 467
	/* The backing object for the context is done after switching to the
468
	 * *next* context. Therefore we cannot retire the previous context until
469
	 * the next context has already started running. In fact, the below code
470
	 * is a bit suboptimal because the retiring can occur simply after the
471
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
472
	 */
4104 Serge 473
	if (from != NULL) {
474
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
4560 Serge 475
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
3031 serge 476
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
477
		 * whole damn pipeline, we don't need to explicitly mark the
478
		 * object dirty. The only exception is that the context must be
479
		 * correct in case the object gets swapped out. Ideally we'd be
480
		 * able to defer doing this until we know the object would be
481
		 * swapped, but there is no way to do that yet.
482
		 */
4104 Serge 483
		from->obj->dirty = 1;
484
		BUG_ON(from->obj->ring != ring);
3031 serge 485
 
4560 Serge 486
		/* obj is kept alive until the next request by its active ref */
4104 Serge 487
		i915_gem_object_unpin(from->obj);
488
		i915_gem_context_unreference(from);
3031 serge 489
	}
490
 
4104 Serge 491
	i915_gem_context_reference(to);
492
	ring->last_context = to;
3031 serge 493
	to->is_initialized = true;
494
 
495
	return 0;
496
}
497
 
498
/**
499
 * i915_switch_context() - perform a GPU context switch.
500
 * @ring: ring for which we'll execute the context switch
501
 * @file_priv: file_priv associated with the context, may be NULL
502
 * @id: context id number
503
 *
504
 * The context life cycle is simple. The context refcount is incremented and
505
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
506
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
507
 * object while letting the normal object tracking destroy the backing BO.
508
 */
509
int i915_switch_context(struct intel_ring_buffer *ring,
510
			struct drm_file *file,
511
			int to_id)
512
{
513
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
514
	struct i915_hw_context *to;
515
 
4560 Serge 516
	if (!HAS_HW_CONTEXTS(ring->dev))
3031 serge 517
		return 0;
518
 
4104 Serge 519
	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
520
 
3031 serge 521
	if (ring != &dev_priv->ring[RCS])
522
		return 0;
523
 
524
	if (to_id == DEFAULT_CONTEXT_ID) {
525
		to = ring->default_context;
526
	} else {
527
		if (file == NULL)
528
			return -EINVAL;
529
 
530
		to = i915_gem_context_get(file->driver_priv, to_id);
531
		if (to == NULL)
532
			return -ENOENT;
533
	}
534
 
535
	return do_switch(to);
536
}
537
 
538
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
539
				  struct drm_file *file)
540
{
541
	struct drm_i915_gem_context_create *args = data;
542
	struct drm_i915_file_private *file_priv = file->driver_priv;
543
	struct i915_hw_context *ctx;
544
	int ret;
545
 
546
	if (!(dev->driver->driver_features & DRIVER_GEM))
547
		return -ENODEV;
548
 
4560 Serge 549
	if (!HAS_HW_CONTEXTS(dev))
3031 serge 550
		return -ENODEV;
551
 
552
	ret = i915_mutex_lock_interruptible(dev);
553
	if (ret)
554
		return ret;
555
 
556
	ctx = create_hw_context(dev, file_priv);
557
	mutex_unlock(&dev->struct_mutex);
558
	if (IS_ERR(ctx))
559
		return PTR_ERR(ctx);
560
 
561
	args->ctx_id = ctx->id;
562
	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
563
 
564
	return 0;
565
}
566
 
567
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
568
				   struct drm_file *file)
569
{
570
	struct drm_i915_gem_context_destroy *args = data;
571
	struct drm_i915_file_private *file_priv = file->driver_priv;
572
	struct i915_hw_context *ctx;
573
	int ret;
574
 
575
	if (!(dev->driver->driver_features & DRIVER_GEM))
576
		return -ENODEV;
577
 
578
	ret = i915_mutex_lock_interruptible(dev);
579
	if (ret)
580
		return ret;
581
 
582
	ctx = i915_gem_context_get(file_priv, args->ctx_id);
583
	if (!ctx) {
584
		mutex_unlock(&dev->struct_mutex);
585
		return -ENOENT;
586
	}
587
 
4246 Serge 588
	idr_remove(&ctx->file_priv->context_idr, ctx->id);
589
	i915_gem_context_unreference(ctx);
3031 serge 590
	mutex_unlock(&dev->struct_mutex);
591
 
592
	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
593
	return 0;
594
}