Subversion Repositories Kolibri OS

Rev

Rev 2360 | Rev 3037 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2360 Rev 3031
Line 24... Line 24...
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
26
 *
27
 */
27
 */
Line 28... Line 28...
28
 
28
 
-
 
29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
#include "drmP.h"
30
 
30
#include "drm.h"
31
#include 
31
#include "drm_crtc_helper.h"
32
#include 
32
#include "drm_fb_helper.h"
33
#include 
33
#include "intel_drv.h"
34
#include "intel_drv.h"
34
#include "i915_drm.h"
35
#include 
35
#include "i915_drv.h"
-
 
36
#include 
36
#include "i915_drv.h"
37
#include "i915_trace.h"
-
 
38
//#include "../../../platform/x86/intel_ips.h"
37
#include "i915_trace.h"
39
#include 
38
#include 
40
//#include 
39
//#include 
41
//#include 
40
//#include 
42
//#include 
41
//#include 
43
//#include 
42
//#include 
44
#include 
43
#include 
Line 45... Line 44...
45
//#include 
44
//#include 
Line -... Line 45...
-
 
45
 
-
 
46
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
-
 
47
 
-
 
48
 
-
 
49
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
 
50
 
-
 
51
#define BEGIN_LP_RING(n) \
-
 
52
	intel_ring_begin(LP_RING(dev_priv), (n))
-
 
53
 
-
 
54
#define OUT_RING(x) \
-
 
55
	intel_ring_emit(LP_RING(dev_priv), x)
-
 
56
 
-
 
57
#define ADVANCE_LP_RING() \
-
 
58
	intel_ring_advance(LP_RING(dev_priv))
-
 
59
 
46
 
60
/**
47
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
61
 * Lock test for when it's just for synchronization of ring access.
-
 
62
 *
-
 
63
 * In that case, we don't need to do it when GEM is initialized as nobody else
-
 
64
 * has access to the ring.
-
 
65
 */
-
 
66
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
-
 
67
	if (LP_RING(dev->dev_private)->obj == NULL)			\
-
 
68
		LOCK_TEST_WITH_RETURN(dev, file);			\
-
 
69
} while (0)
48
 
70
 
-
 
71
static inline u32
49
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
72
intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
50
                    u32 *val)
73
{
-
 
74
	if (I915_NEED_GFX_HWS(dev_priv->dev))
51
{
75
		return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
Line -... Line 76...
-
 
76
	else
-
 
77
		return intel_read_status_page(LP_RING(dev_priv), reg);
-
 
78
}
-
 
79
 
-
 
80
#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
-
 
81
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-
 
82
#define I915_BREADCRUMB_INDEX		0x21
-
 
83
 
Line -... Line 84...
-
 
84
void i915_update_dri1_breadcrumb(struct drm_device *dev)
-
 
85
{
-
 
86
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
87
	struct drm_i915_master_private *master_priv;
-
 
88
 
-
 
89
	if (dev->primary->master) {
-
 
90
		master_priv = dev->primary->master->driver_priv;
Line 52... Line 91...
52
    *val = PciRead32(dev->busnr, dev->devfn, where);
91
		if (master_priv->sarea_priv)
53
    return 1;
92
			master_priv->sarea_priv->last_dispatch =
54
}
93
				READ_BREADCRUMB(dev_priv);
55
 
94
	}
Line 74... Line 113...
74
{
113
{
75
    drm_i915_private_t *dev_priv = dev->dev_private;
114
	drm_i915_private_t *dev_priv = dev->dev_private;
Line 76... Line 115...
76
 
115
 
77
    /* Program Hardware Status Page */
116
	/* Program Hardware Status Page */
78
    dev_priv->status_page_dmah =
117
	dev_priv->status_page_dmah =
Line 79... Line 118...
79
        (void*)drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
118
		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
80
 
119
 
81
    if (!dev_priv->status_page_dmah) {
120
	if (!dev_priv->status_page_dmah) {
82
        DRM_ERROR("Can not allocate hardware status page\n");
121
		DRM_ERROR("Can not allocate hardware status page\n");
Line -... Line 122...
-
 
122
		return -ENOMEM;
-
 
123
	}
-
 
124
 
-
 
125
    memset((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
-
 
126
		  0, PAGE_SIZE);
-
 
127
 
-
 
128
	i915_write_hws_pga(dev);
-
 
129
 
-
 
130
	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
-
 
131
	return 0;
-
 
132
}
-
 
133
 
-
 
134
/**
-
 
135
 * Frees the hardware status page, whether it's a physical address or a virtual
-
 
136
 * address set up by the X Server.
-
 
137
 */
-
 
138
static void i915_free_hws(struct drm_device *dev)
-
 
139
{
-
 
140
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
141
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
 
142
 
-
 
143
	if (dev_priv->status_page_dmah) {
-
 
144
		drm_pci_free(dev, dev_priv->status_page_dmah);
-
 
145
		dev_priv->status_page_dmah = NULL;
-
 
146
	}
-
 
147
 
-
 
148
	if (ring->status_page.gfx_addr) {
-
 
149
		ring->status_page.gfx_addr = 0;
-
 
150
		iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
-
 
151
	}
-
 
152
 
-
 
153
	/* Need to rewrite hardware status page */
-
 
154
	I915_WRITE(HWS_PGA, 0x1ffff000);
-
 
155
}
-
 
156
 
-
 
157
#if 0
-
 
158
 
-
 
159
void i915_kernel_lost_context(struct drm_device * dev)
-
 
160
{
-
 
161
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
162
	struct drm_i915_master_private *master_priv;
-
 
163
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
 
164
 
-
 
165
	/*
-
 
166
	 * We should never lose context on the ring with modesetting
-
 
167
	 * as we don't expose it to userspace
-
 
168
	 */
-
 
169
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
170
		return;
-
 
171
 
-
 
172
	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-
 
173
	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-
 
174
	ring->space = ring->head - (ring->tail + 8);
-
 
175
	if (ring->space < 0)
-
 
176
		ring->space += ring->size;
-
 
177
 
-
 
178
	if (!dev->primary->master)
-
 
179
		return;
-
 
180
 
-
 
181
	master_priv = dev->primary->master->driver_priv;
-
 
182
	if (ring->head == ring->tail && master_priv->sarea_priv)
-
 
183
		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
-
 
184
}
-
 
185
 
-
 
186
static int i915_dma_cleanup(struct drm_device * dev)
-
 
187
{
-
 
188
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
189
	int i;
-
 
190
 
-
 
191
	/* Make sure interrupts are disabled here because the uninstall ioctl
-
 
192
	 * may not have been called from userspace and after dev_private
-
 
193
	 * is freed, it's too late.
-
 
194
	 */
-
 
195
	if (dev->irq_enabled)
-
 
196
		drm_irq_uninstall(dev);
-
 
197
 
-
 
198
	mutex_lock(&dev->struct_mutex);
-
 
199
	for (i = 0; i < I915_NUM_RINGS; i++)
-
 
200
		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
-
 
201
	mutex_unlock(&dev->struct_mutex);
-
 
202
 
-
 
203
	/* Clear the HWS virtual address at teardown */
-
 
204
	if (I915_NEED_GFX_HWS(dev))
-
 
205
		i915_free_hws(dev);
-
 
206
 
-
 
207
	return 0;
-
 
208
}
-
 
209
 
-
 
210
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
-
 
211
{
-
 
212
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
213
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
 
214
	int ret;
-
 
215
 
-
 
216
	master_priv->sarea = drm_getsarea(dev);
-
 
217
	if (master_priv->sarea) {
-
 
218
		master_priv->sarea_priv = (drm_i915_sarea_t *)
-
 
219
			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
-
 
220
	} else {
-
 
221
		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
-
 
222
	}
-
 
223
 
-
 
224
	if (init->ring_size != 0) {
-
 
225
		if (LP_RING(dev_priv)->obj != NULL) {
-
 
226
			i915_dma_cleanup(dev);
-
 
227
			DRM_ERROR("Client tried to initialize ringbuffer in "
-
 
228
				  "GEM mode\n");
-
 
229
			return -EINVAL;
-
 
230
		}
-
 
231
 
-
 
232
		ret = intel_render_ring_init_dri(dev,
-
 
233
						 init->ring_start,
-
 
234
						 init->ring_size);
-
 
235
		if (ret) {
-
 
236
			i915_dma_cleanup(dev);
-
 
237
			return ret;
-
 
238
		}
-
 
239
	}
-
 
240
 
-
 
241
	dev_priv->dri1.cpp = init->cpp;
-
 
242
	dev_priv->dri1.back_offset = init->back_offset;
-
 
243
	dev_priv->dri1.front_offset = init->front_offset;
-
 
244
	dev_priv->dri1.current_page = 0;
-
 
245
	if (master_priv->sarea_priv)
-
 
246
		master_priv->sarea_priv->pf_current_page = 0;
-
 
247
 
-
 
248
	/* Allow hardware batchbuffers unless told otherwise.
-
 
249
	 */
-
 
250
	dev_priv->dri1.allow_batchbuffer = 1;
-
 
251
 
-
 
252
	return 0;
-
 
253
}
-
 
254
 
-
 
255
static int i915_dma_resume(struct drm_device * dev)
-
 
256
{
-
 
257
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
258
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
 
259
 
-
 
260
	DRM_DEBUG_DRIVER("%s\n", __func__);
-
 
261
 
-
 
262
	if (ring->virtual_start == NULL) {
-
 
263
		DRM_ERROR("can not ioremap virtual address for"
-
 
264
			  " ring buffer\n");
-
 
265
		return -ENOMEM;
-
 
266
	}
-
 
267
 
-
 
268
	/* Program Hardware Status Page */
-
 
269
	if (!ring->status_page.page_addr) {
-
 
270
		DRM_ERROR("Can not find hardware status page\n");
-
 
271
		return -EINVAL;
-
 
272
	}
-
 
273
	DRM_DEBUG_DRIVER("hw status page @ %p\n",
-
 
274
				ring->status_page.page_addr);
83
        return -ENOMEM;
275
	if (ring->status_page.gfx_addr != 0)
Line 84... Line 276...
84
    }
276
		intel_ring_setup_status_page(ring);
-
 
277
	else
-
 
278
		i915_write_hws_pga(dev);
-
 
279
 
-
 
280
	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
-
 
281
 
-
 
282
	return 0;
-
 
283
}
-
 
284
 
-
 
285
static int i915_dma_init(struct drm_device *dev, void *data,
-
 
286
			 struct drm_file *file_priv)
-
 
287
{
-
 
288
	drm_i915_init_t *init = data;
-
 
289
	int retcode = 0;
-
 
290
 
-
 
291
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
292
		return -ENODEV;
-
 
293
 
-
 
294
	switch (init->func) {
-
 
295
	case I915_INIT_DMA:
-
 
296
		retcode = i915_initialize(dev, init);
-
 
297
		break;
-
 
298
	case I915_CLEANUP_DMA:
-
 
299
		retcode = i915_dma_cleanup(dev);
-
 
300
		break;
-
 
301
	case I915_RESUME_DMA:
-
 
302
		retcode = i915_dma_resume(dev);
-
 
303
		break;
-
 
304
	default:
-
 
305
		retcode = -EINVAL;
-
 
306
		break;
-
 
307
	}
-
 
308
 
-
 
309
	return retcode;
-
 
310
}
-
 
311
 
-
 
312
/* Implement basically the same security restrictions as hardware does
-
 
313
 * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
-
 
314
 *
-
 
315
 * Most of the calculations below involve calculating the size of a
-
 
316
 * particular instruction.  It's important to get the size right as
-
 
317
 * that tells us where the next instruction to check is.  Any illegal
-
 
318
 * instruction detected will be given a size of zero, which is a
-
 
319
 * signal to abort the rest of the buffer.
-
 
320
 */
-
 
321
static int validate_cmd(int cmd)
-
 
322
{
-
 
323
	switch (((cmd >> 29) & 0x7)) {
-
 
324
	case 0x0:
-
 
325
		switch ((cmd >> 23) & 0x3f) {
-
 
326
		case 0x0:
-
 
327
			return 1;	/* MI_NOOP */
-
 
328
		case 0x4:
-
 
329
			return 1;	/* MI_FLUSH */
-
 
330
		default:
-
 
331
			return 0;	/* disallow everything else */
-
 
332
		}
-
 
333
		break;
-
 
334
	case 0x1:
-
 
335
		return 0;	/* reserved */
-
 
336
	case 0x2:
-
 
337
		return (cmd & 0xff) + 2;	/* 2d commands */
-
 
338
	case 0x3:
-
 
339
		if (((cmd >> 24) & 0x1f) <= 0x18)
-
 
340
			return 1;
-
 
341
 
-
 
342
		switch ((cmd >> 24) & 0x1f) {
-
 
343
		case 0x1c:
-
 
344
			return 1;
-
 
345
		case 0x1d:
-
 
346
			switch ((cmd >> 16) & 0xff) {
-
 
347
			case 0x3:
-
 
348
				return (cmd & 0x1f) + 2;
-
 
349
			case 0x4:
-
 
350
				return (cmd & 0xf) + 2;
-
 
351
			default:
-
 
352
				return (cmd & 0xffff) + 2;
-
 
353
			}
-
 
354
		case 0x1e:
-
 
355
			if (cmd & (1 << 23))
-
 
356
				return (cmd & 0xffff) + 1;
-
 
357
			else
-
 
358
				return 1;
-
 
359
		case 0x1f:
-
 
360
			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
-
 
361
				return (cmd & 0x1ffff) + 2;
-
 
362
			else if (cmd & (1 << 17))	/* indirect random */
-
 
363
				if ((cmd & 0xffff) == 0)
-
 
364
					return 0;	/* unknown length, too hard */
-
 
365
				else
-
 
366
					return (((cmd & 0xffff) + 1) / 2) + 1;
-
 
367
			else
-
 
368
				return 2;	/* indirect sequential */
-
 
369
		default:
-
 
370
			return 0;
-
 
371
		}
-
 
372
	default:
-
 
373
		return 0;
-
 
374
	}
-
 
375
 
-
 
376
	return 0;
-
 
377
}
-
 
378
 
-
 
379
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
-
 
380
{
-
 
381
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
382
	int i, ret;
-
 
383
 
-
 
384
	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
-
 
385
		return -EINVAL;
-
 
386
 
-
 
387
	for (i = 0; i < dwords;) {
-
 
388
		int sz = validate_cmd(buffer[i]);
-
 
389
		if (sz == 0 || i + sz > dwords)
-
 
390
			return -EINVAL;
-
 
391
		i += sz;
-
 
392
	}
-
 
393
 
-
 
394
	ret = BEGIN_LP_RING((dwords+1)&~1);
-
 
395
	if (ret)
-
 
396
		return ret;
-
 
397
 
-
 
398
	for (i = 0; i < dwords; i++)
-
 
399
		OUT_RING(buffer[i]);
-
 
400
	if (dwords & 1)
-
 
401
		OUT_RING(0);
-
 
402
 
-
 
403
	ADVANCE_LP_RING();
-
 
404
 
-
 
405
	return 0;
-
 
406
}
-
 
407
 
-
 
408
int
-
 
409
i915_emit_box(struct drm_device *dev,
-
 
410
	      struct drm_clip_rect *box,
-
 
411
	      int DR1, int DR4)
-
 
412
{
-
 
413
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
414
	int ret;
-
 
415
 
-
 
416
	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
-
 
417
	    box->y2 <= 0 || box->x2 <= 0) {
-
 
418
		DRM_ERROR("Bad box %d,%d..%d,%d\n",
-
 
419
			  box->x1, box->y1, box->x2, box->y2);
-
 
420
		return -EINVAL;
-
 
421
	}
-
 
422
 
-
 
423
	if (INTEL_INFO(dev)->gen >= 4) {
-
 
424
		ret = BEGIN_LP_RING(4);
-
 
425
		if (ret)
-
 
426
			return ret;
-
 
427
 
-
 
428
		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
-
 
429
		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
-
 
430
		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
-
 
431
		OUT_RING(DR4);
-
 
432
	} else {
-
 
433
		ret = BEGIN_LP_RING(6);
-
 
434
		if (ret)
-
 
435
			return ret;
-
 
436
 
-
 
437
		OUT_RING(GFX_OP_DRAWRECT_INFO);
-
 
438
		OUT_RING(DR1);
-
 
439
		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
-
 
440
		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
-
 
441
		OUT_RING(DR4);
-
 
442
		OUT_RING(0);
-
 
443
	}
-
 
444
	ADVANCE_LP_RING();
-
 
445
 
-
 
446
	return 0;
-
 
447
}
-
 
448
 
-
 
449
/* XXX: Emitting the counter should really be moved to part of the IRQ
-
 
450
 * emit. For now, do it in both places:
-
 
451
 */
-
 
452
 
-
 
453
static void i915_emit_breadcrumb(struct drm_device *dev)
-
 
454
{
-
 
455
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
456
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
 
457
 
-
 
458
	dev_priv->counter++;
-
 
459
	if (dev_priv->counter > 0x7FFFFFFFUL)
-
 
460
		dev_priv->counter = 0;
-
 
461
	if (master_priv->sarea_priv)
-
 
462
		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
 
463
 
-
 
464
	if (BEGIN_LP_RING(4) == 0) {
-
 
465
		OUT_RING(MI_STORE_DWORD_INDEX);
-
 
466
		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-
 
467
		OUT_RING(dev_priv->counter);
-
 
468
		OUT_RING(0);
-
 
469
		ADVANCE_LP_RING();
-
 
470
	}
-
 
471
}
-
 
472
 
-
 
473
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
-
 
474
				   drm_i915_cmdbuffer_t *cmd,
-
 
475
				   struct drm_clip_rect *cliprects,
-
 
476
				   void *cmdbuf)
-
 
477
{
-
 
478
	int nbox = cmd->num_cliprects;
-
 
479
	int i = 0, count, ret;
-
 
480
 
-
 
481
	if (cmd->sz & 0x3) {
-
 
482
		DRM_ERROR("alignment");
-
 
483
		return -EINVAL;
-
 
484
	}
-
 
485
 
-
 
486
	i915_kernel_lost_context(dev);
-
 
487
 
-
 
488
	count = nbox ? nbox : 1;
-
 
489
 
-
 
490
	for (i = 0; i < count; i++) {
-
 
491
		if (i < nbox) {
-
 
492
			ret = i915_emit_box(dev, &cliprects[i],
-
 
493
					    cmd->DR1, cmd->DR4);
-
 
494
			if (ret)
-
 
495
				return ret;
-
 
496
		}
-
 
497
 
-
 
498
		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
-
 
499
		if (ret)
-
 
500
			return ret;
-
 
501
	}
-
 
502
 
-
 
503
	i915_emit_breadcrumb(dev);
-
 
504
	return 0;
-
 
505
}
-
 
506
 
-
 
507
static int i915_dispatch_batchbuffer(struct drm_device * dev,
-
 
508
				     drm_i915_batchbuffer_t * batch,
-
 
509
				     struct drm_clip_rect *cliprects)
-
 
510
{
-
 
511
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
512
	int nbox = batch->num_cliprects;
-
 
513
	int i, count, ret;
-
 
514
 
-
 
515
	if ((batch->start | batch->used) & 0x7) {
-
 
516
		DRM_ERROR("alignment");
-
 
517
		return -EINVAL;
-
 
518
	}
-
 
519
 
-
 
520
	i915_kernel_lost_context(dev);
-
 
521
 
-
 
522
	count = nbox ? nbox : 1;
-
 
523
	for (i = 0; i < count; i++) {
-
 
524
		if (i < nbox) {
-
 
525
			ret = i915_emit_box(dev, &cliprects[i],
-
 
526
					    batch->DR1, batch->DR4);
-
 
527
			if (ret)
-
 
528
				return ret;
-
 
529
		}
-
 
530
 
-
 
531
		if (!IS_I830(dev) && !IS_845G(dev)) {
-
 
532
			ret = BEGIN_LP_RING(2);
-
 
533
			if (ret)
-
 
534
				return ret;
-
 
535
 
-
 
536
			if (INTEL_INFO(dev)->gen >= 4) {
-
 
537
				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
-
 
538
				OUT_RING(batch->start);
-
 
539
			} else {
-
 
540
				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
-
 
541
				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
-
 
542
			}
-
 
543
		} else {
-
 
544
			ret = BEGIN_LP_RING(4);
-
 
545
			if (ret)
-
 
546
				return ret;
-
 
547
 
-
 
548
			OUT_RING(MI_BATCH_BUFFER);
-
 
549
			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
-
 
550
			OUT_RING(batch->start + batch->used - 4);
-
 
551
			OUT_RING(0);
-
 
552
		}
-
 
553
		ADVANCE_LP_RING();
-
 
554
	}
-
 
555
 
-
 
556
 
-
 
557
	if (IS_G4X(dev) || IS_GEN5(dev)) {
-
 
558
		if (BEGIN_LP_RING(2) == 0) {
-
 
559
			OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
-
 
560
			OUT_RING(MI_NOOP);
-
 
561
			ADVANCE_LP_RING();
-
 
562
		}
-
 
563
	}
-
 
564
 
-
 
565
	i915_emit_breadcrumb(dev);
-
 
566
	return 0;
-
 
567
}
-
 
568
 
-
 
569
static int i915_dispatch_flip(struct drm_device * dev)
-
 
570
{
-
 
571
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
572
	struct drm_i915_master_private *master_priv =
-
 
573
		dev->primary->master->driver_priv;
-
 
574
	int ret;
-
 
575
 
-
 
576
	if (!master_priv->sarea_priv)
-
 
577
		return -EINVAL;
-
 
578
 
-
 
579
	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
-
 
580
			  __func__,
-
 
581
			 dev_priv->dri1.current_page,
-
 
582
			 master_priv->sarea_priv->pf_current_page);
-
 
583
 
-
 
584
	i915_kernel_lost_context(dev);
-
 
585
 
-
 
586
	ret = BEGIN_LP_RING(10);
-
 
587
	if (ret)
-
 
588
		return ret;
-
 
589
 
-
 
590
	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
-
 
591
	OUT_RING(0);
-
 
592
 
-
 
593
	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
-
 
594
	OUT_RING(0);
-
 
595
	if (dev_priv->dri1.current_page == 0) {
-
 
596
		OUT_RING(dev_priv->dri1.back_offset);
-
 
597
		dev_priv->dri1.current_page = 1;
-
 
598
	} else {
-
 
599
		OUT_RING(dev_priv->dri1.front_offset);
-
 
600
		dev_priv->dri1.current_page = 0;
-
 
601
	}
-
 
602
	OUT_RING(0);
-
 
603
 
-
 
604
	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
-
 
605
	OUT_RING(0);
-
 
606
 
-
 
607
	ADVANCE_LP_RING();
-
 
608
 
-
 
609
	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
-
 
610
 
-
 
611
	if (BEGIN_LP_RING(4) == 0) {
-
 
612
		OUT_RING(MI_STORE_DWORD_INDEX);
-
 
613
		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-
 
614
		OUT_RING(dev_priv->counter);
-
 
615
		OUT_RING(0);
-
 
616
		ADVANCE_LP_RING();
-
 
617
	}
-
 
618
 
-
 
619
	master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
-
 
620
	return 0;
-
 
621
}
-
 
622
 
-
 
623
static int i915_quiescent(struct drm_device *dev)
-
 
624
{
-
 
625
	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
-
 
626
 
-
 
627
	i915_kernel_lost_context(dev);
-
 
628
	return intel_wait_ring_idle(ring);
-
 
629
}
-
 
630
 
-
 
631
static int i915_flush_ioctl(struct drm_device *dev, void *data,
-
 
632
			    struct drm_file *file_priv)
-
 
633
{
-
 
634
	int ret;
-
 
635
 
-
 
636
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
637
		return -ENODEV;
-
 
638
 
-
 
639
	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
 
640
 
-
 
641
	mutex_lock(&dev->struct_mutex);
-
 
642
	ret = i915_quiescent(dev);
-
 
643
	mutex_unlock(&dev->struct_mutex);
-
 
644
 
-
 
645
	return ret;
-
 
646
}
-
 
647
 
-
 
648
static int i915_batchbuffer(struct drm_device *dev, void *data,
-
 
649
			    struct drm_file *file_priv)
-
 
650
{
-
 
651
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
652
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
 
653
	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
-
 
654
	    master_priv->sarea_priv;
-
 
655
	drm_i915_batchbuffer_t *batch = data;
-
 
656
	int ret;
-
 
657
	struct drm_clip_rect *cliprects = NULL;
-
 
658
 
-
 
659
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
660
		return -ENODEV;
-
 
661
 
-
 
662
	if (!dev_priv->dri1.allow_batchbuffer) {
-
 
663
		DRM_ERROR("Batchbuffer ioctl disabled\n");
-
 
664
		return -EINVAL;
-
 
665
	}
-
 
666
 
-
 
667
	DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
-
 
668
			batch->start, batch->used, batch->num_cliprects);
-
 
669
 
-
 
670
	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
 
671
 
-
 
672
	if (batch->num_cliprects < 0)
-
 
673
		return -EINVAL;
-
 
674
 
-
 
675
	if (batch->num_cliprects) {
-
 
676
		cliprects = kcalloc(batch->num_cliprects,
-
 
677
				    sizeof(struct drm_clip_rect),
-
 
678
				    GFP_KERNEL);
-
 
679
		if (cliprects == NULL)
-
 
680
			return -ENOMEM;
-
 
681
 
-
 
682
		ret = copy_from_user(cliprects, batch->cliprects,
-
 
683
				     batch->num_cliprects *
-
 
684
				     sizeof(struct drm_clip_rect));
-
 
685
		if (ret != 0) {
-
 
686
			ret = -EFAULT;
-
 
687
			goto fail_free;
-
 
688
		}
-
 
689
	}
-
 
690
 
-
 
691
	mutex_lock(&dev->struct_mutex);
-
 
692
	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
-
 
693
	mutex_unlock(&dev->struct_mutex);
-
 
694
 
-
 
695
	if (sarea_priv)
-
 
696
		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
 
697
 
-
 
698
fail_free:
-
 
699
	kfree(cliprects);
-
 
700
 
-
 
701
	return ret;
-
 
702
}
-
 
703
 
-
 
704
static int i915_cmdbuffer(struct drm_device *dev, void *data,
-
 
705
			  struct drm_file *file_priv)
-
 
706
{
-
 
707
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
708
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
 
709
	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
-
 
710
	    master_priv->sarea_priv;
-
 
711
	drm_i915_cmdbuffer_t *cmdbuf = data;
-
 
712
	struct drm_clip_rect *cliprects = NULL;
-
 
713
	void *batch_data;
-
 
714
	int ret;
-
 
715
 
-
 
716
	DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
-
 
717
			cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
-
 
718
 
-
 
719
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
720
		return -ENODEV;
-
 
721
 
-
 
722
	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
 
723
 
-
 
724
	if (cmdbuf->num_cliprects < 0)
-
 
725
		return -EINVAL;
-
 
726
 
-
 
727
	batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
-
 
728
	if (batch_data == NULL)
-
 
729
		return -ENOMEM;
-
 
730
 
-
 
731
	ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
-
 
732
	if (ret != 0) {
-
 
733
		ret = -EFAULT;
-
 
734
		goto fail_batch_free;
-
 
735
	}
-
 
736
 
-
 
737
	if (cmdbuf->num_cliprects) {
-
 
738
		cliprects = kcalloc(cmdbuf->num_cliprects,
-
 
739
				    sizeof(struct drm_clip_rect), GFP_KERNEL);
-
 
740
		if (cliprects == NULL) {
-
 
741
			ret = -ENOMEM;
-
 
742
			goto fail_batch_free;
-
 
743
		}
-
 
744
 
-
 
745
		ret = copy_from_user(cliprects, cmdbuf->cliprects,
-
 
746
				     cmdbuf->num_cliprects *
-
 
747
				     sizeof(struct drm_clip_rect));
-
 
748
		if (ret != 0) {
-
 
749
			ret = -EFAULT;
-
 
750
			goto fail_clip_free;
-
 
751
		}
-
 
752
	}
-
 
753
 
-
 
754
	mutex_lock(&dev->struct_mutex);
-
 
755
	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
-
 
756
	mutex_unlock(&dev->struct_mutex);
-
 
757
	if (ret) {
-
 
758
		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
-
 
759
		goto fail_clip_free;
-
 
760
	}
-
 
761
 
-
 
762
	if (sarea_priv)
-
 
763
		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
 
764
 
-
 
765
fail_clip_free:
-
 
766
	kfree(cliprects);
-
 
767
fail_batch_free:
-
 
768
	kfree(batch_data);
-
 
769
 
-
 
770
	return ret;
-
 
771
}
-
 
772
 
-
 
773
static int i915_emit_irq(struct drm_device * dev)
-
 
774
{
-
 
775
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
776
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
 
777
 
-
 
778
	i915_kernel_lost_context(dev);
-
 
779
 
-
 
780
	DRM_DEBUG_DRIVER("\n");
-
 
781
 
-
 
782
	dev_priv->counter++;
-
 
783
	if (dev_priv->counter > 0x7FFFFFFFUL)
-
 
784
		dev_priv->counter = 1;
-
 
785
	if (master_priv->sarea_priv)
-
 
786
		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
 
787
 
-
 
788
	if (BEGIN_LP_RING(4) == 0) {
-
 
789
		OUT_RING(MI_STORE_DWORD_INDEX);
-
 
790
		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-
 
791
		OUT_RING(dev_priv->counter);
-
 
792
		OUT_RING(MI_USER_INTERRUPT);
-
 
793
		ADVANCE_LP_RING();
-
 
794
	}
-
 
795
 
-
 
796
	return dev_priv->counter;
-
 
797
}
-
 
798
 
-
 
799
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
-
 
800
{
-
 
801
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
802
	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
 
803
	int ret = 0;
-
 
804
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
 
805
 
-
 
806
	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
-
 
807
		  READ_BREADCRUMB(dev_priv));
-
 
808
 
-
 
809
	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
-
 
810
		if (master_priv->sarea_priv)
-
 
811
			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
 
812
		return 0;
-
 
813
	}
-
 
814
 
-
 
815
	if (master_priv->sarea_priv)
-
 
816
		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
 
817
 
-
 
818
	if (ring->irq_get(ring)) {
-
 
819
		DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
-
 
820
			    READ_BREADCRUMB(dev_priv) >= irq_nr);
-
 
821
		ring->irq_put(ring);
-
 
822
	} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
-
 
823
		ret = -EBUSY;
-
 
824
 
-
 
825
	if (ret == -EBUSY) {
-
 
826
		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
-
 
827
			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
-
 
828
	}
-
 
829
 
-
 
830
	return ret;
-
 
831
}
-
 
832
 
-
 
833
/* Needs the lock as it touches the ring.
-
 
834
 */
-
 
835
static int i915_irq_emit(struct drm_device *dev, void *data,
-
 
836
			 struct drm_file *file_priv)
-
 
837
{
-
 
838
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
839
	drm_i915_irq_emit_t *emit = data;
-
 
840
	int result;
-
 
841
 
-
 
842
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
843
		return -ENODEV;
-
 
844
 
-
 
845
	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
-
 
846
		DRM_ERROR("called with no initialization\n");
-
 
847
		return -EINVAL;
-
 
848
	}
-
 
849
 
-
 
850
	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
 
851
 
-
 
852
	mutex_lock(&dev->struct_mutex);
-
 
853
	result = i915_emit_irq(dev);
-
 
854
	mutex_unlock(&dev->struct_mutex);
-
 
855
 
-
 
856
	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
-
 
857
		DRM_ERROR("copy_to_user\n");
-
 
858
		return -EFAULT;
-
 
859
	}
-
 
860
 
-
 
861
	return 0;
-
 
862
}
-
 
863
 
-
 
864
/* Doesn't need the hardware lock.
-
 
865
 */
-
 
866
static int i915_irq_wait(struct drm_device *dev, void *data,
-
 
867
			 struct drm_file *file_priv)
-
 
868
{
-
 
869
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
870
	drm_i915_irq_wait_t *irqwait = data;
-
 
871
 
-
 
872
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
873
		return -ENODEV;
-
 
874
 
-
 
875
	if (!dev_priv) {
-
 
876
		DRM_ERROR("called with no initialization\n");
-
 
877
		return -EINVAL;
-
 
878
	}
-
 
879
 
-
 
880
	return i915_wait_irq(dev, irqwait->irq_seq);
-
 
881
}
-
 
882
 
-
 
883
static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
-
 
884
			 struct drm_file *file_priv)
-
 
885
{
-
 
886
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
887
	drm_i915_vblank_pipe_t *pipe = data;
-
 
888
 
-
 
889
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
890
		return -ENODEV;
-
 
891
 
-
 
892
	if (!dev_priv) {
-
 
893
		DRM_ERROR("called with no initialization\n");
-
 
894
		return -EINVAL;
-
 
895
	}
-
 
896
 
-
 
897
	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
 
898
 
-
 
899
	return 0;
-
 
900
}
-
 
901
 
-
 
902
/**
-
 
903
 * Schedule buffer swap at given vertical blank.
-
 
904
 */
-
 
905
static int i915_vblank_swap(struct drm_device *dev, void *data,
-
 
906
		     struct drm_file *file_priv)
-
 
907
{
-
 
908
	/* The delayed swap mechanism was fundamentally racy, and has been
-
 
909
	 * removed.  The model was that the client requested a delayed flip/swap
-
 
910
	 * from the kernel, then waited for vblank before continuing to perform
-
 
911
	 * rendering.  The problem was that the kernel might wake the client
-
 
912
	 * up before it dispatched the vblank swap (since the lock has to be
-
 
913
	 * held while touching the ringbuffer), in which case the client would
-
 
914
	 * clear and start the next frame before the swap occurred, and
-
 
915
	 * flicker would occur in addition to likely missing the vblank.
-
 
916
	 *
-
 
917
	 * In the absence of this ioctl, userland falls back to a correct path
-
 
918
	 * of waiting for a vblank, then dispatching the swap on its own.
-
 
919
	 * Context switching to userland and back is plenty fast enough for
-
 
920
	 * meeting the requirements of vblank swapping.
-
 
921
	 */
-
 
922
	return -EINVAL;
-
 
923
}
-
 
924
 
-
 
925
static int i915_flip_bufs(struct drm_device *dev, void *data,
-
 
926
			  struct drm_file *file_priv)
-
 
927
{
-
 
928
	int ret;
-
 
929
 
-
 
930
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
931
		return -ENODEV;
-
 
932
 
-
 
933
	DRM_DEBUG_DRIVER("%s\n", __func__);
-
 
934
 
-
 
935
	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
 
936
 
-
 
937
	mutex_lock(&dev->struct_mutex);
-
 
938
	ret = i915_dispatch_flip(dev);
-
 
939
	mutex_unlock(&dev->struct_mutex);
-
 
940
 
-
 
941
	return ret;
-
 
942
}
-
 
943
 
-
 
944
static int i915_getparam(struct drm_device *dev, void *data,
-
 
945
			 struct drm_file *file_priv)
-
 
946
{
-
 
947
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
948
	drm_i915_getparam_t *param = data;
-
 
949
	int value;
-
 
950
 
-
 
951
	if (!dev_priv) {
-
 
952
		DRM_ERROR("called with no initialization\n");
-
 
953
		return -EINVAL;
-
 
954
	}
-
 
955
 
-
 
956
	switch (param->param) {
-
 
957
	case I915_PARAM_IRQ_ACTIVE:
-
 
958
		value = dev->pdev->irq ? 1 : 0;
-
 
959
		break;
-
 
960
	case I915_PARAM_ALLOW_BATCHBUFFER:
-
 
961
		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
-
 
962
		break;
-
 
963
	case I915_PARAM_LAST_DISPATCH:
-
 
964
		value = READ_BREADCRUMB(dev_priv);
-
 
965
		break;
-
 
966
	case I915_PARAM_CHIPSET_ID:
-
 
967
		value = dev->pci_device;
-
 
968
		break;
-
 
969
	case I915_PARAM_HAS_GEM:
-
 
970
		value = 1;
-
 
971
		break;
-
 
972
	case I915_PARAM_NUM_FENCES_AVAIL:
-
 
973
		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
-
 
974
		break;
-
 
975
	case I915_PARAM_HAS_OVERLAY:
-
 
976
		value = dev_priv->overlay ? 1 : 0;
-
 
977
		break;
-
 
978
	case I915_PARAM_HAS_PAGEFLIPPING:
-
 
979
		value = 1;
-
 
980
		break;
-
 
981
	case I915_PARAM_HAS_EXECBUF2:
-
 
982
		/* depends on GEM */
-
 
983
		value = 1;
-
 
984
		break;
-
 
985
	case I915_PARAM_HAS_BSD:
-
 
986
		value = intel_ring_initialized(&dev_priv->ring[VCS]);
-
 
987
		break;
-
 
988
	case I915_PARAM_HAS_BLT:
-
 
989
		value = intel_ring_initialized(&dev_priv->ring[BCS]);
-
 
990
		break;
-
 
991
	case I915_PARAM_HAS_RELAXED_FENCING:
-
 
992
		value = 1;
-
 
993
		break;
-
 
994
	case I915_PARAM_HAS_COHERENT_RINGS:
-
 
995
		value = 1;
-
 
996
		break;
-
 
997
	case I915_PARAM_HAS_EXEC_CONSTANTS:
-
 
998
		value = INTEL_INFO(dev)->gen >= 4;
-
 
999
		break;
-
 
1000
	case I915_PARAM_HAS_RELAXED_DELTA:
-
 
1001
		value = 1;
-
 
1002
		break;
-
 
1003
	case I915_PARAM_HAS_GEN7_SOL_RESET:
-
 
1004
		value = 1;
-
 
1005
		break;
-
 
1006
	case I915_PARAM_HAS_LLC:
-
 
1007
		value = HAS_LLC(dev);
-
 
1008
		break;
-
 
1009
	case I915_PARAM_HAS_ALIASING_PPGTT:
-
 
1010
		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
-
 
1011
		break;
-
 
1012
	case I915_PARAM_HAS_WAIT_TIMEOUT:
-
 
1013
		value = 1;
-
 
1014
		break;
-
 
1015
	case I915_PARAM_HAS_SEMAPHORES:
-
 
1016
		value = i915_semaphore_is_enabled(dev);
-
 
1017
		break;
-
 
1018
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
-
 
1019
		value = 1;
-
 
1020
		break;
-
 
1021
	default:
-
 
1022
		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
-
 
1023
				 param->param);
-
 
1024
		return -EINVAL;
-
 
1025
	}
-
 
1026
 
-
 
1027
	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
-
 
1028
		DRM_ERROR("DRM_COPY_TO_USER failed\n");
-
 
1029
		return -EFAULT;
-
 
1030
	}
-
 
1031
 
-
 
1032
	return 0;
-
 
1033
}
-
 
1034
 
-
 
1035
static int i915_setparam(struct drm_device *dev, void *data,
-
 
1036
			 struct drm_file *file_priv)
-
 
1037
{
-
 
1038
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
1039
	drm_i915_setparam_t *param = data;
-
 
1040
 
-
 
1041
	if (!dev_priv) {
-
 
1042
		DRM_ERROR("called with no initialization\n");
-
 
1043
		return -EINVAL;
-
 
1044
	}
-
 
1045
 
-
 
1046
	switch (param->param) {
-
 
1047
	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
-
 
1048
		break;
-
 
1049
	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
-
 
1050
		break;
-
 
1051
	case I915_SETPARAM_ALLOW_BATCHBUFFER:
-
 
1052
		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
-
 
1053
		break;
-
 
1054
	case I915_SETPARAM_NUM_USED_FENCES:
-
 
1055
		if (param->value > dev_priv->num_fence_regs ||
-
 
1056
		    param->value < 0)
-
 
1057
			return -EINVAL;
-
 
1058
		/* Userspace can use first N regs */
-
 
1059
		dev_priv->fence_reg_start = param->value;
-
 
1060
		break;
-
 
1061
	default:
-
 
1062
		DRM_DEBUG_DRIVER("unknown parameter %d\n",
85
 
1063
					param->param);
86
    i915_write_hws_pga(dev);
1064
		return -EINVAL;
-
 
1065
	}
-
 
1066
 
-
 
1067
	return 0;
-
 
1068
}
-
 
1069
#endif
-
 
1070
 
-
 
1071
 
-
 
1072
static int i915_set_status_page(struct drm_device *dev, void *data,
-
 
1073
				struct drm_file *file_priv)
-
 
1074
{
-
 
1075
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
1076
	drm_i915_hws_addr_t *hws = data;
Line -... Line 1077...
-
 
1077
	struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
 
1078
 
Line -... Line 1079...
-
 
1079
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
1080
		return -ENODEV;
-
 
1081
 
-
 
1082
	if (!I915_NEED_GFX_HWS(dev))
-
 
1083
		return -EINVAL;
-
 
1084
 
-
 
1085
	if (!dev_priv) {
-
 
1086
		DRM_ERROR("called with no initialization\n");
-
 
1087
		return -EINVAL;
Line -... Line 1088...
-
 
1088
	}
Line -... Line 1089...
-
 
1089
 
Line -... Line 1090...
-
 
1090
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-
 
1091
		WARN(1, "tried to set status page when mode setting active\n");
-
 
1092
		return 0;
-
 
1093
	}
-
 
1094
 
-
 
1095
	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
-
 
1096
 
-
 
1097
	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
-
 
1098
 
Line -... Line 1099...
-
 
1099
	dev_priv->dri1.gfx_hws_cpu_addr =
-
 
1100
        ioremap(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
Line -... Line 1101...
-
 
1101
	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
-
 
1102
		i915_dma_cleanup(dev);
-
 
1103
		ring->status_page.gfx_addr = 0;
-
 
1104
		DRM_ERROR("can not ioremap virtual address for"
-
 
1105
				" G33 hw status page\n");
-
 
1106
		return -ENOMEM;
Line -... Line 1107...
-
 
1107
	}
-
 
1108
 
-
 
1109
    memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
Line -... Line 1110...
-
 
1110
	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
-
 
1111
 
-
 
1112
	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
-
 
1113
			 ring->status_page.gfx_addr);
-
 
1114
	DRM_DEBUG_DRIVER("load hws at %p\n",
-
 
1115
			 ring->status_page.page_addr);
-
 
1116
	return 0;
Line 87... Line 1117...
87
 
1117
}
88
    dbgprintf("Enabled hardware status page\n");
1118
 
89
    return 0;
1119
static int i915_get_bridge_dev(struct drm_device *dev)
Line 149... Line 1179...
149
	}
1179
	}
150
#endif
1180
#endif
151
}
1181
}
Line -... Line 1182...
-
 
1182
 
-
 
1183
 
-
 
1184
/* true = enable decode, false = disable decoder */
-
 
1185
static unsigned int i915_vga_set_decode(void *cookie, bool state)
Line -... Line 1186...
-
 
1186
{
-
 
1187
	struct drm_device *dev = cookie;
-
 
1188
 
-
 
1189
	intel_modeset_vga_set_state(dev, state);
-
 
1190
	if (state)
-
 
1191
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
-
 
1192
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
Line 152... Line -...
152
 
-
 
153
 
-
 
154
 
-
 
155
 
-
 
156
 
-
 
157
 
-
 
158
 
-
 
159
 
-
 
160
 
-
 
161
 
-
 
162
 
-
 
163
 
-
 
164
 
-
 
165
 
-
 
166
 
-
 
167
#define LFB_SIZE 0xC00000
-
 
168
 
-
 
169
static int i915_load_gem_init(struct drm_device *dev)
-
 
170
{
-
 
171
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
172
	unsigned long prealloc_size, gtt_size, mappable_size;
-
 
173
	int ret;
-
 
174
 
-
 
175
	prealloc_size = dev_priv->mm.gtt->stolen_size;
-
 
176
	gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
-
 
177
	mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
 
178
 
-
 
179
    dbgprintf("%s prealloc: %x gtt: %x mappable: %x\n",__FUNCTION__,
-
 
180
             prealloc_size, gtt_size, mappable_size);
-
 
181
 
-
 
182
	/* Basic memrange allocator for stolen space */
-
 
183
	drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
-
 
184
 
-
 
185
	/* Let GEM Manage all of the aperture.
-
 
186
	 *
-
 
187
	 * However, leave one page at the end still bound to the scratch page.
-
 
188
	 * There are a number of places where the hardware apparently
-
 
189
	 * prefetches past the end of the object, and we've seen multiple
-
 
190
	 * hangs with the GPU head pointer stuck in a batchbuffer bound
-
 
191
	 * at the last page of the aperture.  One page should be enough to
-
 
192
	 * keep any prefetching inside of the aperture.
-
 
193
	 */
-
 
194
    i915_gem_do_init(dev, LFB_SIZE, mappable_size, gtt_size - PAGE_SIZE - LFB_SIZE);
-
 
195
 
-
 
196
    mutex_lock(&dev->struct_mutex);
-
 
197
    ret = i915_gem_init_ringbuffer(dev);
-
 
198
    mutex_unlock(&dev->struct_mutex);
-
 
199
    if (ret)
-
 
200
        return ret;
-
 
201
 
-
 
202
	/* Try to set up FBC with a reasonable compressed buffer size */
-
 
203
//   if (I915_HAS_FBC(dev) && i915_powersave) {
-
 
204
//       int cfb_size;
-
 
205
 
-
 
206
		/* Leave 1M for line length buffer & misc. */
-
 
207
 
-
 
208
		/* Try to get a 32M buffer... */
-
 
209
//       if (prealloc_size > (36*1024*1024))
-
 
210
//           cfb_size = 32*1024*1024;
-
 
211
//       else /* fall back to 7/8 of the stolen space */
-
 
212
//           cfb_size = prealloc_size * 7 / 8;
1193
	else
213
//       i915_setup_compression(dev, cfb_size);
1194
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
214
//   }
1195
}
215
 
1196
 
Line 227... Line 1208...
227
    if (ret)
1208
    if (ret)
228
        DRM_INFO("failed to find VBIOS tables\n");
1209
        DRM_INFO("failed to find VBIOS tables\n");
Line 229... Line 1210...
229
 
1210
 
Line 230... Line 1211...
230
//    intel_register_dsm_handler();
1211
//    intel_register_dsm_handler();
231
 
1212
 
-
 
1213
	/* Initialise stolen first so that we may reserve preallocated
232
    /* IIR "flip pending" bit means done if this bit is set */
1214
	 * objects for the BIOS to KMS transition.
-
 
1215
	 */
-
 
1216
	ret = i915_gem_init_stolen(dev);
Line 233... Line 1217...
233
    if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
1217
	if (ret)
Line 234... Line 1218...
234
        dev_priv->flip_pending_is_done = true;
1218
		goto cleanup_vga_switcheroo;
235
 
1219
 
236
    intel_modeset_init(dev);
1220
    intel_modeset_init(dev);
Line 237... Line 1221...
237
 
1221
 
Line 238... Line 1222...
238
    ret = i915_load_gem_init(dev);
1222
	ret = i915_gem_init(dev);
239
    if (ret)
1223
    if (ret)
Line 264... Line 1248...
264
//    drm_irq_uninstall(dev);
1248
//    drm_irq_uninstall(dev);
265
cleanup_gem:
1249
cleanup_gem:
266
//    mutex_lock(&dev->struct_mutex);
1250
//    mutex_lock(&dev->struct_mutex);
267
//    i915_gem_cleanup_ringbuffer(dev);
1251
//    i915_gem_cleanup_ringbuffer(dev);
268
//    mutex_unlock(&dev->struct_mutex);
1252
//    mutex_unlock(&dev->struct_mutex);
-
 
1253
//	i915_gem_cleanup_aliasing_ppgtt(dev);
-
 
1254
cleanup_gem_stolen:
-
 
1255
//	i915_gem_cleanup_stolen(dev);
269
cleanup_vga_switcheroo:
1256
cleanup_vga_switcheroo:
270
//    vga_switcheroo_unregister_client(dev->pdev);
1257
//    vga_switcheroo_unregister_client(dev->pdev);
271
cleanup_vga_client:
1258
cleanup_vga_client:
272
//    vga_client_register(dev->pdev, NULL, NULL, NULL);
1259
//    vga_client_register(dev->pdev, NULL, NULL, NULL);
273
out:
1260
out:
274
    return ret;
1261
    return ret;
275
}
1262
}
Line 276... Line -...
276
 
-
 
277
 
-
 
278
 
-
 
279
static void i915_pineview_get_mem_freq(struct drm_device *dev)
-
 
280
{
-
 
281
    drm_i915_private_t *dev_priv = dev->dev_private;
-
 
282
    u32 tmp;
-
 
283
 
-
 
284
    tmp = I915_READ(CLKCFG);
-
 
285
 
-
 
286
    switch (tmp & CLKCFG_FSB_MASK) {
-
 
287
    case CLKCFG_FSB_533:
-
 
288
        dev_priv->fsb_freq = 533; /* 133*4 */
-
 
289
        break;
-
 
290
    case CLKCFG_FSB_800:
-
 
291
        dev_priv->fsb_freq = 800; /* 200*4 */
-
 
292
        break;
-
 
293
    case CLKCFG_FSB_667:
-
 
294
        dev_priv->fsb_freq =  667; /* 167*4 */
-
 
295
        break;
-
 
296
    case CLKCFG_FSB_400:
-
 
297
        dev_priv->fsb_freq = 400; /* 100*4 */
-
 
298
        break;
-
 
299
    }
-
 
300
 
-
 
301
    switch (tmp & CLKCFG_MEM_MASK) {
-
 
302
    case CLKCFG_MEM_533:
-
 
303
        dev_priv->mem_freq = 533;
-
 
304
        break;
-
 
305
    case CLKCFG_MEM_667:
-
 
306
        dev_priv->mem_freq = 667;
-
 
307
        break;
-
 
308
    case CLKCFG_MEM_800:
-
 
309
        dev_priv->mem_freq = 800;
-
 
310
        break;
-
 
311
    }
-
 
312
 
-
 
313
    /* detect pineview DDR3 setting */
-
 
314
    tmp = I915_READ(CSHRDDR3CTL);
-
 
315
    dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-
 
316
}
-
 
317
 
-
 
318
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
-
 
319
{
-
 
320
    drm_i915_private_t *dev_priv = dev->dev_private;
-
 
321
    u16 ddrpll, csipll;
-
 
322
 
-
 
323
    ddrpll = I915_READ16(DDRMPLL1);
-
 
324
    csipll = I915_READ16(CSIPLL0);
-
 
325
 
-
 
326
    switch (ddrpll & 0xff) {
-
 
327
    case 0xc:
-
 
328
        dev_priv->mem_freq = 800;
-
 
329
        break;
-
 
330
    case 0x10:
-
 
331
        dev_priv->mem_freq = 1066;
-
 
332
        break;
-
 
333
    case 0x14:
-
 
334
        dev_priv->mem_freq = 1333;
-
 
335
        break;
-
 
336
    case 0x18:
-
 
337
        dev_priv->mem_freq = 1600;
-
 
338
        break;
-
 
339
    default:
-
 
340
        DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
-
 
341
                 ddrpll & 0xff);
-
 
342
        dev_priv->mem_freq = 0;
-
 
343
        break;
-
 
344
    }
-
 
345
 
-
 
346
    dev_priv->r_t = dev_priv->mem_freq;
-
 
347
 
-
 
348
    switch (csipll & 0x3ff) {
-
 
349
    case 0x00c:
-
 
350
        dev_priv->fsb_freq = 3200;
-
 
351
        break;
-
 
352
    case 0x00e:
-
 
353
        dev_priv->fsb_freq = 3733;
-
 
354
        break;
-
 
355
    case 0x010:
-
 
356
        dev_priv->fsb_freq = 4266;
-
 
357
        break;
-
 
358
    case 0x012:
-
 
359
        dev_priv->fsb_freq = 4800;
-
 
360
        break;
-
 
361
    case 0x014:
-
 
362
        dev_priv->fsb_freq = 5333;
-
 
363
        break;
-
 
364
    case 0x016:
-
 
365
        dev_priv->fsb_freq = 5866;
-
 
366
        break;
-
 
367
    case 0x018:
-
 
368
        dev_priv->fsb_freq = 6400;
-
 
369
        break;
-
 
370
    default:
-
 
371
        DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
-
 
372
                 csipll & 0x3ff);
-
 
373
        dev_priv->fsb_freq = 0;
-
 
374
        break;
-
 
375
    }
-
 
376
 
-
 
377
    if (dev_priv->fsb_freq == 3200) {
-
 
378
        dev_priv->c_m = 0;
-
 
379
    } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
-
 
380
        dev_priv->c_m = 1;
-
 
381
    } else {
-
 
Line 382... Line 1263...
382
        dev_priv->c_m = 2;
1263
 
383
    }
1264
 
384
}
1265
 
Line -... Line 1266...
-
 
1266
 
-
 
1267
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
385
 
1268
{
-
 
1269
	const struct intel_device_info *info = dev_priv->info;
-
 
1270
 
386
static int i915_get_bridge_dev(struct drm_device *dev)
1271
#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
387
{
1272
#define DEV_INFO_SEP ,
-
 
1273
	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
388
    struct drm_i915_private *dev_priv = dev->dev_private;
1274
			 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
389
 
1275
			 info->gen,
390
    dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
-
 
391
    if (!dev_priv->bridge_dev) {
-
 
392
        DRM_ERROR("bridge device not found\n");
-
 
393
        return -1;
-
 
394
    }
-
 
395
    return 0;
-
 
396
}
-
 
397
 
-
 
398
 
-
 
399
/* Global for IPS driver to get at the current i915 device */
-
 
400
static struct drm_i915_private *i915_mch_dev;
-
 
401
/*
-
 
402
 * Lock protecting IPS related data structures
-
 
403
 *   - i915_mch_dev
-
 
404
 *   - dev_priv->max_delay
-
 
405
 *   - dev_priv->min_delay
-
 
Line 406... Line 1276...
406
 *   - dev_priv->fmax
1276
			 dev_priv->dev->pdev->device,
407
 *   - dev_priv->gpu_busy
1277
			 DEV_INFO_FLAGS);
408
 */
1278
#undef DEV_INFO_FLAG
409
static DEFINE_SPINLOCK(mchdev_lock);
1279
#undef DEV_INFO_SEP
Line 421... Line 1291...
421
 *   - setup the DRM framebuffer with the allocated memory
1291
 *   - setup the DRM framebuffer with the allocated memory
422
 */
1292
 */
423
int i915_driver_load(struct drm_device *dev, unsigned long flags)
1293
int i915_driver_load(struct drm_device *dev, unsigned long flags)
424
{
1294
{
425
    struct drm_i915_private *dev_priv;
1295
    struct drm_i915_private *dev_priv;
-
 
1296
	struct intel_device_info *info;
426
    int ret = 0, mmio_bar;
1297
	int ret = 0, mmio_bar, mmio_size;
427
    uint32_t agp_size;
1298
	uint32_t aperture_size;
Line 428... Line 1299...
428
 
1299
 
Line -... Line 1300...
-
 
1300
	ENTER();
-
 
1301
 
-
 
1302
	info = (struct intel_device_info *) flags;
-
 
1303
 
-
 
1304
#if 0
-
 
1305
	/* Refuse to load on gen6+ without kms enabled. */
-
 
1306
	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
-
 
1307
		return -ENODEV;
-
 
1308
 
-
 
1309
	/* i915 has 4 more counters */
-
 
1310
	dev->counters += 4;
-
 
1311
	dev->types[6] = _DRM_STAT_IRQ;
-
 
1312
	dev->types[7] = _DRM_STAT_PRIMARY;
-
 
1313
	dev->types[8] = _DRM_STAT_SECONDARY;
-
 
1314
	dev->types[9] = _DRM_STAT_DMA;
429
    ENTER();
1315
#endif
430
 
1316
 
431
    dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1317
    dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
Line 432... Line 1318...
432
    if (dev_priv == NULL)
1318
    if (dev_priv == NULL)
433
        return -ENOMEM;
1319
        return -ENOMEM;
434
 
1320
 
-
 
1321
    dev->dev_private = (void *)dev_priv;
-
 
1322
    dev_priv->dev = dev;
Line 435... Line 1323...
435
    dev->dev_private = (void *)dev_priv;
1323
	dev_priv->info = info;
436
    dev_priv->dev = dev;
1324
 
437
    dev_priv->info = (struct intel_device_info *) flags;
1325
	i915_dump_device_info(dev_priv);
438
 
1326
 
Line -... Line 1327...
-
 
1327
    if (i915_get_bridge_dev(dev)) {
-
 
1328
        ret = -EIO;
-
 
1329
        goto free_priv;
-
 
1330
    }
-
 
1331
 
-
 
1332
	ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
-
 
1333
	if (!ret) {
-
 
1334
		DRM_ERROR("failed to set up gmch\n");
-
 
1335
		ret = -EIO;
-
 
1336
		goto put_bridge;
-
 
1337
	}
-
 
1338
 
-
 
1339
	dev_priv->mm.gtt = intel_gtt_get();
-
 
1340
	if (!dev_priv->mm.gtt) {
-
 
1341
		DRM_ERROR("Failed to initialize GTT\n");
-
 
1342
		ret = -ENODEV;
-
 
1343
		goto put_gmch;
439
    if (i915_get_bridge_dev(dev)) {
1344
	}
440
        ret = -EIO;
1345
 
441
        goto free_priv;
1346
 
Line 442... Line 1347...
442
    }
1347
	pci_set_master(dev->pdev);
Line 455... Line 1360...
455
     */
1360
     */
456
//    if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1361
//    if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
457
//        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1362
//        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
Line 458... Line 1363...
458
 
1363
 
-
 
1364
    mmio_bar = IS_GEN2(dev) ? 1 : 0;
-
 
1365
	/* Before gen4, the registers and the GTT are behind different BARs.
-
 
1366
	 * However, from gen4 onwards, the registers and the GTT are shared
-
 
1367
	 * in the same BAR, so we want to restrict this ioremap from
-
 
1368
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
-
 
1369
	 * the register BAR remains the same size for all the earlier
-
 
1370
	 * generations up to Ironlake.
-
 
1371
	 */
-
 
1372
	if (info->gen < 5)
-
 
1373
		mmio_size = 512*1024;
-
 
1374
	else
-
 
1375
		mmio_size = 2*1024*1024;
459
    mmio_bar = IS_GEN2(dev) ? 1 : 0;
1376
 
460
    dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1377
	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
461
    if (!dev_priv->regs) {
1378
    if (!dev_priv->regs) {
462
        DRM_ERROR("failed to map registers\n");
1379
        DRM_ERROR("failed to map registers\n");
463
        ret = -EIO;
-
 
464
        goto put_bridge;
-
 
465
    }
-
 
466
 
-
 
467
    dev_priv->mm.gtt = intel_gtt_get();
-
 
468
    if (!dev_priv->mm.gtt) {
-
 
469
        DRM_ERROR("Failed to initialize GTT\n");
-
 
470
        ret = -ENODEV;
1380
        ret = -EIO;
471
        goto out_rmmap;
1381
		goto put_gmch;
Line 472... Line 1382...
472
    }
1382
    }
-
 
1383
 
Line 473... Line 1384...
473
 
1384
	aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
 
1385
	dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
Line 474... Line 1386...
474
//    agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1386
 
475
 
1387
    dbgprintf("gtt_base_addr %x aperture_size %d\n",
-
 
1388
               dev_priv->mm.gtt_base_addr, aperture_size );
476
/*   agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;   */
1389
 
477
 
1390
//   dev_priv->mm.gtt_mapping =
478
//    dev_priv->mm.gtt_mapping =
1391
//       io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
479
//        io_mapping_create_wc(dev->agp->base, agp_size);
1392
//                    aperture_size);
Line 480... Line -...
480
//    if (dev_priv->mm.gtt_mapping == NULL) {
-
 
481
//        ret = -EIO;
-
 
482
//        goto out_rmmap;
-
 
483
//    }
-
 
484
 
-
 
485
    /* Set up a WC MTRR for non-PAT systems.  This is more common than
1393
//   if (dev_priv->mm.gtt_mapping == NULL) {
486
     * one would think, because the kernel disables PAT on first
1394
//       ret = -EIO;
487
     * generation Core chips because WC PAT gets overridden by a UC
-
 
488
     * MTRR if present.  Even if a UC MTRR isn't present.
-
 
489
     */
-
 
490
//    dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
-
 
491
//                     agp_size,
-
 
Line 492... Line 1395...
492
//                     MTRR_TYPE_WRCOMB, 1);
1395
//        goto out_rmmap;
493
//    if (dev_priv->mm.gtt_mtrr < 0) {
1396
//    }
494
//        DRM_INFO("MTRR allocation failed.  Graphics "
1397
 
495
//             "performance may suffer.\n");
1398
//	i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
Line 504... Line 1407...
504
     * It is also used for periodic low-priority events, such as
1407
     * It is also used for periodic low-priority events, such as
505
     * idle-timers and recording error state.
1408
     * idle-timers and recording error state.
506
     *
1409
     *
507
     * All tasks on the workqueue are expected to acquire the dev mutex
1410
     * All tasks on the workqueue are expected to acquire the dev mutex
508
     * so there is no point in running more than one instance of the
1411
     * so there is no point in running more than one instance of the
509
     * workqueue at any time: max_active = 1 and NON_REENTRANT.
1412
	 * workqueue at any time.  Use an ordered one.
510
     */
1413
     */
511
      dev_priv->wq = alloc_workqueue("i915",
1414
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
512
                         WQ_UNBOUND | WQ_NON_REENTRANT,
-
 
513
                         1);
-
 
514
      if (dev_priv->wq == NULL) {
1415
      if (dev_priv->wq == NULL) {
515
          DRM_ERROR("Failed to create our workqueue.\n");
1416
          DRM_ERROR("Failed to create our workqueue.\n");
516
          ret = -ENOMEM;
1417
          ret = -ENOMEM;
517
          goto out_mtrrfree;
1418
          goto out_mtrrfree;
518
      }
1419
      }
Line 519... Line 1420...
519
 
1420
 
520
    /* enable GEM by default */
1421
	/* This must be called before any calls to HAS_PCH_* */
Line 521... Line 1422...
521
    dev_priv->has_gem = 1;
1422
	intel_detect_pch(dev);
-
 
1423
 
Line 522... Line 1424...
522
 
1424
	intel_irq_init(dev);
523
	intel_irq_init(dev);
1425
	intel_gt_init(dev);
524
 
1426
 
525
    /* Try to make sure MCHBAR is enabled before poking at it */
1427
    /* Try to make sure MCHBAR is enabled before poking at it */
Line 537... Line 1439...
537
        ret = i915_init_phys_hws(dev);
1439
        ret = i915_init_phys_hws(dev);
538
        if (ret)
1440
        if (ret)
539
            goto out_gem_unload;
1441
            goto out_gem_unload;
540
    }
1442
    }
Line 541... Line -...
541
 
-
 
542
    if (IS_PINEVIEW(dev))
-
 
543
        i915_pineview_get_mem_freq(dev);
-
 
544
    else if (IS_GEN5(dev))
-
 
545
        i915_ironlake_get_mem_freq(dev);
-
 
546
 
1443
 
547
    /* On the 945G/GM, the chipset reports the MSI capability on the
1444
    /* On the 945G/GM, the chipset reports the MSI capability on the
548
     * integrated graphics even though the support isn't actually there
1445
     * integrated graphics even though the support isn't actually there
549
     * according to the published specs.  It doesn't appear to function
1446
     * according to the published specs.  It doesn't appear to function
550
     * correctly in testing on 945G.
1447
     * correctly in testing on 945G.
Line 553... Line 1450...
553
     *
1450
     *
554
     * According to chipset errata, on the 965GM, MSI interrupts may
1451
     * According to chipset errata, on the 965GM, MSI interrupts may
555
     * be lost or delayed, but we use them anyways to avoid
1452
     * be lost or delayed, but we use them anyways to avoid
556
     * stuck interrupts on some machines.
1453
     * stuck interrupts on some machines.
557
     */
1454
     */
558
//    if (!IS_I945G(dev) && !IS_I945GM(dev))
-
 
559
//        pci_enable_msi(dev->pdev);
-
 
Line 560... Line -...
560
 
-
 
561
	spin_lock_init(&dev_priv->gt_lock);
1455
 
562
    spin_lock_init(&dev_priv->irq_lock);
1456
    spin_lock_init(&dev_priv->irq_lock);
563
    spin_lock_init(&dev_priv->error_lock);
1457
    spin_lock_init(&dev_priv->error_lock);
-
 
1458
	spin_lock_init(&dev_priv->rps.lock);
Line 564... Line 1459...
564
    spin_lock_init(&dev_priv->rps_lock);
1459
	spin_lock_init(&dev_priv->dpio_lock);
565
 
1460
 
566
	if (IS_IVYBRIDGE(dev))
1461
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
567
		dev_priv->num_pipe = 3;
1462
		dev_priv->num_pipe = 3;
568
	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1463
	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
569
        dev_priv->num_pipe = 2;
1464
        dev_priv->num_pipe = 2;
Line 575... Line 1470...
575
//        goto out_gem_unload;
1470
//        goto out_gem_unload;
Line 576... Line 1471...
576
 
1471
 
577
    /* Start out suspended */
1472
    /* Start out suspended */
Line 578... Line -...
578
    dev_priv->mm.suspended = 1;
-
 
579
 
-
 
580
    intel_detect_pch(dev);
1473
    dev_priv->mm.suspended = 1;
581
 
1474
 
582
    ret = i915_load_modeset_init(dev);
1475
    ret = i915_load_modeset_init(dev);
583
    if (ret < 0) {
1476
    if (ret < 0) {
584
        DRM_ERROR("failed to init modeset\n");
1477
        DRM_ERROR("failed to init modeset\n");
Line 590... Line 1483...
590
//    acpi_video_register();
1483
//    acpi_video_register();
Line 591... Line 1484...
591
 
1484
 
592
//    setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1485
//    setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
Line 593... Line -...
593
//            (unsigned long) dev);
-
 
594
 
-
 
595
    spin_lock(&mchdev_lock);
-
 
596
    i915_mch_dev = dev_priv;
-
 
Line -... Line 1486...
-
 
1486
//            (unsigned long) dev);
597
    dev_priv->mchdev_lock = &mchdev_lock;
1487
 
Line 598... Line 1488...
598
    spin_unlock(&mchdev_lock);
1488
 
Line 599... Line 1489...
599
 
1489
	if (IS_GEN5(dev))
Line 613... Line 1503...
613
//    intel_teardown_gmbus(dev);
1503
//    intel_teardown_gmbus(dev);
614
//    intel_teardown_mchbar(dev);
1504
//    intel_teardown_mchbar(dev);
615
//    destroy_workqueue(dev_priv->wq);
1505
//    destroy_workqueue(dev_priv->wq);
616
out_mtrrfree:
1506
out_mtrrfree:
617
//    if (dev_priv->mm.gtt_mtrr >= 0) {
1507
//	if (dev_priv->mm.gtt_mtrr >= 0) {
618
//        mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1508
//		mtrr_del(dev_priv->mm.gtt_mtrr,
619
//             dev->agp->agp_info.aper_size * 1024 * 1024);
1509
//			 dev_priv->mm.gtt_base_addr,
-
 
1510
//			 aperture_size);
620
//        dev_priv->mm.gtt_mtrr = -1;
1511
//		dev_priv->mm.gtt_mtrr = -1;
621
//    }
1512
//	}
622
//    io_mapping_free(dev_priv->mm.gtt_mapping);
1513
//	io_mapping_free(dev_priv->mm.gtt_mapping);
623
 
-
 
624
out_rmmap:
1514
out_rmmap:
625
    pci_iounmap(dev->pdev, dev_priv->regs);
1515
    pci_iounmap(dev->pdev, dev_priv->regs);
-
 
1516
put_gmch:
-
 
1517
//   intel_gmch_remove();
626
put_bridge:
1518
put_bridge:
627
//    pci_dev_put(dev_priv->bridge_dev);
1519
//    pci_dev_put(dev_priv->bridge_dev);
628
free_priv:
1520
free_priv:
629
    kfree(dev_priv);
1521
    kfree(dev_priv);
630
    return ret;
1522
    return ret;
631
}
1523
}
Line -... Line 1524...
-
 
1524
 
-
 
1525
#if 0
-
 
1526
 
-
 
1527
int i915_driver_unload(struct drm_device *dev)
-
 
1528
{
-
 
1529
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1530
	int ret;
-
 
1531
 
-
 
1532
	intel_gpu_ips_teardown();
-
 
1533
 
-
 
1534
	i915_teardown_sysfs(dev);
-
 
1535
 
-
 
1536
	if (dev_priv->mm.inactive_shrinker.shrink)
-
 
1537
		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
-
 
1538
 
-
 
1539
	mutex_lock(&dev->struct_mutex);
-
 
1540
	ret = i915_gpu_idle(dev);
-
 
1541
	if (ret)
-
 
1542
		DRM_ERROR("failed to idle hardware: %d\n", ret);
-
 
1543
	i915_gem_retire_requests(dev);
-
 
1544
	mutex_unlock(&dev->struct_mutex);
-
 
1545
 
-
 
1546
	/* Cancel the retire work handler, which should be idle now. */
-
 
1547
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
-
 
1548
 
-
 
1549
	io_mapping_free(dev_priv->mm.gtt_mapping);
-
 
1550
	if (dev_priv->mm.gtt_mtrr >= 0) {
-
 
1551
		mtrr_del(dev_priv->mm.gtt_mtrr,
-
 
1552
			 dev_priv->mm.gtt_base_addr,
-
 
1553
			 dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
-
 
1554
		dev_priv->mm.gtt_mtrr = -1;
-
 
1555
	}
-
 
1556
 
-
 
1557
	acpi_video_unregister();
-
 
1558
 
-
 
1559
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-
 
1560
		intel_fbdev_fini(dev);
-
 
1561
		intel_modeset_cleanup(dev);
-
 
1562
 
-
 
1563
		/*
-
 
1564
		 * free the memory space allocated for the child device
-
 
1565
		 * config parsed from VBT
-
 
1566
		 */
-
 
1567
		if (dev_priv->child_dev && dev_priv->child_dev_num) {
-
 
1568
			kfree(dev_priv->child_dev);
-
 
1569
			dev_priv->child_dev = NULL;
-
 
1570
			dev_priv->child_dev_num = 0;
-
 
1571
		}
-
 
1572
 
-
 
1573
		vga_switcheroo_unregister_client(dev->pdev);
-
 
1574
		vga_client_register(dev->pdev, NULL, NULL, NULL);
-
 
1575
	}
-
 
1576
 
-
 
1577
	/* Free error state after interrupts are fully disabled. */
-
 
1578
	del_timer_sync(&dev_priv->hangcheck_timer);
-
 
1579
	cancel_work_sync(&dev_priv->error_work);
-
 
1580
	i915_destroy_error_state(dev);
-
 
1581
 
-
 
1582
	if (dev->pdev->msi_enabled)
-
 
1583
		pci_disable_msi(dev->pdev);
-
 
1584
 
-
 
1585
	intel_opregion_fini(dev);
-
 
1586
 
-
 
1587
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-
 
1588
		/* Flush any outstanding unpin_work. */
-
 
1589
		flush_workqueue(dev_priv->wq);
-
 
1590
 
-
 
1591
		mutex_lock(&dev->struct_mutex);
-
 
1592
		i915_gem_free_all_phys_object(dev);
-
 
1593
		i915_gem_cleanup_ringbuffer(dev);
-
 
1594
		i915_gem_context_fini(dev);
-
 
1595
		mutex_unlock(&dev->struct_mutex);
-
 
1596
		i915_gem_cleanup_aliasing_ppgtt(dev);
-
 
1597
		i915_gem_cleanup_stolen(dev);
-
 
1598
		drm_mm_takedown(&dev_priv->mm.stolen);
-
 
1599
 
-
 
1600
		intel_cleanup_overlay(dev);
-
 
1601
 
-
 
1602
		if (!I915_NEED_GFX_HWS(dev))
-
 
1603
			i915_free_hws(dev);
-
 
1604
	}
-
 
1605
 
-
 
1606
	if (dev_priv->regs != NULL)
-
 
1607
		pci_iounmap(dev->pdev, dev_priv->regs);
-
 
1608
 
-
 
1609
	intel_teardown_gmbus(dev);
-
 
1610
	intel_teardown_mchbar(dev);
-
 
1611
 
-
 
1612
	destroy_workqueue(dev_priv->wq);
-
 
1613
 
-
 
1614
	pci_dev_put(dev_priv->bridge_dev);
-
 
1615
	kfree(dev->dev_private);
-
 
1616
 
-
 
1617
	return 0;
-
 
1618
}
-
 
1619
 
-
 
1620
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
-
 
1621
{
-
 
1622
	struct drm_i915_file_private *file_priv;
-
 
1623
 
-
 
1624
	DRM_DEBUG_DRIVER("\n");
-
 
1625
	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
-
 
1626
	if (!file_priv)
-
 
1627
		return -ENOMEM;
-
 
1628
 
-
 
1629
	file->driver_priv = file_priv;
-
 
1630
 
-
 
1631
	spin_lock_init(&file_priv->mm.lock);
-
 
1632
	INIT_LIST_HEAD(&file_priv->mm.request_list);
-
 
1633
 
-
 
1634
	idr_init(&file_priv->context_idr);
-
 
1635
 
-
 
1636
	return 0;
-
 
1637
}
-
 
1638
 
-
 
1639
/**
-
 
1640
 * i915_driver_lastclose - clean up after all DRM clients have exited
-
 
1641
 * @dev: DRM device
-
 
1642
 *
-
 
1643
 * Take care of cleaning up after all DRM clients have exited.  In the
-
 
1644
 * mode setting case, we want to restore the kernel's initial mode (just
-
 
1645
 * in case the last client left us in a bad state).
-
 
1646
 *
-
 
1647
 * Additionally, in the non-mode setting case, we'll tear down the GTT
-
 
1648
 * and DMA structures, since the kernel won't be using them, and clea
-
 
1649
 * up any GEM state.
-
 
1650
 */
-
 
1651
void i915_driver_lastclose(struct drm_device * dev)
-
 
1652
{
-
 
1653
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
1654
 
-
 
1655
	/* On gen6+ we refuse to init without kms enabled, but then the drm core
-
 
1656
	 * goes right around and calls lastclose. Check for this and don't clean
-
 
1657
	 * up anything. */
-
 
1658
	if (!dev_priv)
-
 
1659
		return;
-
 
1660
 
-
 
1661
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-
 
1662
		intel_fb_restore_mode(dev);
-
 
1663
		vga_switcheroo_process_delayed_switch();
-
 
1664
		return;
-
 
1665
	}
-
 
1666
 
-
 
1667
	i915_gem_lastclose(dev);
-
 
1668
 
-
 
1669
	i915_dma_cleanup(dev);
-
 
1670
}
-
 
1671
 
-
 
1672
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
-
 
1673
{
-
 
1674
	i915_gem_context_close(dev, file_priv);
-
 
1675
	i915_gem_release(dev, file_priv);
-
 
1676
}
-
 
1677
 
-
 
1678
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
-
 
1679
{
-
 
1680
	struct drm_i915_file_private *file_priv = file->driver_priv;
-
 
1681
 
-
 
1682
	kfree(file_priv);
-
 
1683
}
-
 
1684
 
-
 
1685
struct drm_ioctl_desc i915_ioctls[] = {
-
 
1686
	DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
 
1687
	DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
-
 
1688
	DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
-
 
1689
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
-
 
1690
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
-
 
1691
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
-
 
1692
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
-
 
1693
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
 
1694
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
-
 
1695
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
-
 
1696
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
 
1697
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
-
 
1698
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
 
1699
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
 
1700
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
-
 
1701
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
-
 
1702
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
 
1703
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-
 
1704
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
-
 
1705
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
-
 
1706
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-
 
1707
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-
 
1708
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
-
 
1709
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
-
 
1710
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
-
 
1711
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
-
 
1712
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-
 
1713
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-
 
1714
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
-
 
1715
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
-
 
1716
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
-
 
1717
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
-
 
1718
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
-
 
1719
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
-
 
1720
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
-
 
1721
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
-
 
1722
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
-
 
1723
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
-
 
1724
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
-
 
1725
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
-
 
1726
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-
 
1727
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-
 
1728
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-
 
1729
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-
 
1730
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
-
 
1731
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
-
 
1732
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
-
 
1733
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
-
 
1734
};
-
 
1735
 
-
 
1736
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
-
 
1737
 
-
 
1738
/*
-
 
1739
 * This is really ugly: Because old userspace abused the linux agp interface to
-
 
1740
 * manage the gtt, we need to claim that all intel devices are agp.  For
-
 
1741
 * otherwise the drm core refuses to initialize the agp support code.
-
 
1742
 */
-
 
1743
int i915_driver_device_is_agp(struct drm_device * dev)
-
 
1744
{
-
 
1745
	return 1;
-
 
1746
}