Subversion Repositories Kolibri OS

Rev

Rev 2338 | Rev 2340 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2332 Serge 1
/*
2
 * Copyright © 2008-2010 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *    Zou Nan hai 
26
 *    Xiang Hai hao
27
 *
28
 */
29
#define iowrite32(v, addr)      writel((v), (addr))
30
#define ioread32(addr)          readl(addr)
31
 
32
#include "drmP.h"
33
#include "drm.h"
34
#include "i915_drv.h"
35
#include "i915_drm.h"
36
//#include "i915_trace.h"
37
#include "intel_drv.h"
38
 
39
static inline int ring_space(struct intel_ring_buffer *ring)
40
{
41
	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
42
	if (space < 0)
43
		space += ring->size;
44
	return space;
45
}
46
 
47
static u32 i915_gem_get_seqno(struct drm_device *dev)
48
{
49
	drm_i915_private_t *dev_priv = dev->dev_private;
50
	u32 seqno;
51
 
52
	seqno = dev_priv->next_seqno;
53
 
54
	/* reserve 0 for non-seqno */
55
	if (++dev_priv->next_seqno == 0)
56
		dev_priv->next_seqno = 1;
57
 
58
	return seqno;
59
}
60
 
61
static int
62
render_ring_flush(struct intel_ring_buffer *ring,
63
		  u32	invalidate_domains,
64
		  u32	flush_domains)
65
{
66
	struct drm_device *dev = ring->dev;
67
	u32 cmd;
68
	int ret;
69
 
70
	/*
71
	 * read/write caches:
72
	 *
73
	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
74
	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
75
	 * also flushed at 2d versus 3d pipeline switches.
76
	 *
77
	 * read-only caches:
78
	 *
79
	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
80
	 * MI_READ_FLUSH is set, and is always flushed on 965.
81
	 *
82
	 * I915_GEM_DOMAIN_COMMAND may not exist?
83
	 *
84
	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
85
	 * invalidated when MI_EXE_FLUSH is set.
86
	 *
87
	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
88
	 * invalidated with every MI_FLUSH.
89
	 *
90
	 * TLBs:
91
	 *
92
	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
93
	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
94
	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
95
	 * are flushed at any MI_FLUSH.
96
	 */
97
 
98
	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
99
	if ((invalidate_domains|flush_domains) &
100
	    I915_GEM_DOMAIN_RENDER)
101
		cmd &= ~MI_NO_WRITE_FLUSH;
102
	if (INTEL_INFO(dev)->gen < 4) {
103
		/*
104
		 * On the 965, the sampler cache always gets flushed
105
		 * and this bit is reserved.
106
		 */
107
		if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
108
			cmd |= MI_READ_FLUSH;
109
	}
110
	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
111
		cmd |= MI_EXE_FLUSH;
112
 
113
	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
114
	    (IS_G4X(dev) || IS_GEN5(dev)))
115
		cmd |= MI_INVALIDATE_ISP;
116
 
117
	ret = intel_ring_begin(ring, 2);
118
	if (ret)
119
		return ret;
120
 
121
	intel_ring_emit(ring, cmd);
122
	intel_ring_emit(ring, MI_NOOP);
123
	intel_ring_advance(ring);
124
 
125
	return 0;
126
}
127
 
128
static void ring_write_tail(struct intel_ring_buffer *ring,
129
			    u32 value)
130
{
131
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
132
	I915_WRITE_TAIL(ring, value);
133
}
134
 
135
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
136
{
137
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
138
	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
139
			RING_ACTHD(ring->mmio_base) : ACTHD;
140
 
141
	return I915_READ(acthd_reg);
142
}
143
 
144
static int init_ring_common(struct intel_ring_buffer *ring)
145
{
146
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
147
	struct drm_i915_gem_object *obj = ring->obj;
148
	u32 head;
149
 
150
    ENTER();
151
 
152
	/* Stop the ring if it's running. */
153
	I915_WRITE_CTL(ring, 0);
154
	I915_WRITE_HEAD(ring, 0);
155
	ring->write_tail(ring, 0);
156
 
157
	/* Initialize the ring. */
158
	I915_WRITE_START(ring, obj->gtt_offset);
159
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
160
 
161
	/* G45 ring initialization fails to reset head to zero */
162
	if (head != 0) {
163
		DRM_DEBUG_KMS("%s head not reset to zero "
164
			      "ctl %08x head %08x tail %08x start %08x\n",
165
			      ring->name,
166
			      I915_READ_CTL(ring),
167
			      I915_READ_HEAD(ring),
168
			      I915_READ_TAIL(ring),
169
			      I915_READ_START(ring));
170
 
171
		I915_WRITE_HEAD(ring, 0);
172
 
173
		if (I915_READ_HEAD(ring) & HEAD_ADDR) {
174
			DRM_ERROR("failed to set %s head to zero "
175
				  "ctl %08x head %08x tail %08x start %08x\n",
176
				  ring->name,
177
				  I915_READ_CTL(ring),
178
				  I915_READ_HEAD(ring),
179
				  I915_READ_TAIL(ring),
180
				  I915_READ_START(ring));
181
		}
182
	}
183
 
184
	I915_WRITE_CTL(ring,
185
			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
186
			| RING_REPORT_64K | RING_VALID);
187
 
188
	/* If the head is still not zero, the ring is dead */
189
	if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
190
	    I915_READ_START(ring) != obj->gtt_offset ||
191
	    (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
192
		DRM_ERROR("%s initialization failed "
193
				"ctl %08x head %08x tail %08x start %08x\n",
194
				ring->name,
195
				I915_READ_CTL(ring),
196
				I915_READ_HEAD(ring),
197
				I915_READ_TAIL(ring),
198
				I915_READ_START(ring));
199
		return -EIO;
200
	}
201
 
202
    ring->head = I915_READ_HEAD(ring);
203
    ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
204
    ring->space = ring_space(ring);
205
 
206
    LEAVE();
207
 
208
	return 0;
209
}
210
 
211
/*
212
 * 965+ support PIPE_CONTROL commands, which provide finer grained control
213
 * over cache flushing.
214
 */
215
struct pipe_control {
216
	struct drm_i915_gem_object *obj;
217
	volatile u32 *cpu_page;
218
	u32 gtt_offset;
219
};
220
 
221
static int
222
init_pipe_control(struct intel_ring_buffer *ring)
223
{
224
	struct pipe_control *pc;
225
	struct drm_i915_gem_object *obj;
226
	int ret;
227
 
228
	if (ring->private)
229
		return 0;
230
 
231
	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
232
	if (!pc)
233
		return -ENOMEM;
234
 
235
	obj = i915_gem_alloc_object(ring->dev, 4096);
236
	if (obj == NULL) {
237
		DRM_ERROR("Failed to allocate seqno page\n");
238
		ret = -ENOMEM;
239
		goto err;
240
	}
241
 
2339 Serge 242
//   i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2332 Serge 243
 
244
	ret = i915_gem_object_pin(obj, 4096, true);
245
	if (ret)
246
		goto err_unref;
247
 
248
	pc->gtt_offset = obj->gtt_offset;
2339 Serge 249
    pc->cpu_page =  (void*)MapIoMem(obj->pages[0], 4096, PG_SW);
2332 Serge 250
	if (pc->cpu_page == NULL)
251
		goto err_unpin;
252
 
253
	pc->obj = obj;
254
	ring->private = pc;
255
	return 0;
256
 
257
err_unpin:
2339 Serge 258
//   i915_gem_object_unpin(obj);
2332 Serge 259
err_unref:
2339 Serge 260
//   drm_gem_object_unreference(&obj->base);
2332 Serge 261
err:
262
	kfree(pc);
263
	return ret;
264
}
265
 
266
static void
267
cleanup_pipe_control(struct intel_ring_buffer *ring)
268
{
269
	struct pipe_control *pc = ring->private;
270
	struct drm_i915_gem_object *obj;
271
 
272
	if (!ring->private)
273
		return;
274
 
275
	obj = pc->obj;
2339 Serge 276
//	kunmap(obj->pages[0]);
277
//	i915_gem_object_unpin(obj);
278
//	drm_gem_object_unreference(&obj->base);
2332 Serge 279
 
280
	kfree(pc);
281
	ring->private = NULL;
282
}
283
 
284
static int init_render_ring(struct intel_ring_buffer *ring)
285
{
286
	struct drm_device *dev = ring->dev;
287
	struct drm_i915_private *dev_priv = dev->dev_private;
288
 
289
    ENTER();
290
 
291
	int ret = init_ring_common(ring);
292
 
293
	if (INTEL_INFO(dev)->gen > 3) {
294
		int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
295
		if (IS_GEN6(dev) || IS_GEN7(dev))
296
			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
297
		I915_WRITE(MI_MODE, mode);
298
		if (IS_GEN7(dev))
299
			I915_WRITE(GFX_MODE_GEN7,
300
				   GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
301
				   GFX_MODE_ENABLE(GFX_REPLAY_MODE));
302
	}
303
 
304
	if (INTEL_INFO(dev)->gen >= 6) {
305
	} else if (IS_GEN5(dev)) {
2339 Serge 306
		ret = init_pipe_control(ring);
2332 Serge 307
		if (ret)
308
			return ret;
309
	}
310
 
311
    LEAVE();
312
 
313
	return ret;
314
}
315
 
316
static void render_ring_cleanup(struct intel_ring_buffer *ring)
317
{
318
	if (!ring->private)
319
		return;
320
 
321
	cleanup_pipe_control(ring);
322
}
323
 
324
static void
325
update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
326
{
327
	struct drm_device *dev = ring->dev;
328
	struct drm_i915_private *dev_priv = dev->dev_private;
329
	int id;
330
 
331
	/*
332
	 * cs -> 1 = vcs, 0 = bcs
333
	 * vcs -> 1 = bcs, 0 = cs,
334
	 * bcs -> 1 = cs, 0 = vcs.
335
	 */
336
	id = ring - dev_priv->ring;
337
	id += 2 - i;
338
	id %= 3;
339
 
340
	intel_ring_emit(ring,
341
			MI_SEMAPHORE_MBOX |
342
			MI_SEMAPHORE_REGISTER |
343
			MI_SEMAPHORE_UPDATE);
344
	intel_ring_emit(ring, seqno);
345
	intel_ring_emit(ring,
346
			RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
347
}
348
 
349
static int
350
gen6_add_request(struct intel_ring_buffer *ring,
351
		 u32 *result)
352
{
353
	u32 seqno;
354
	int ret;
355
 
356
	ret = intel_ring_begin(ring, 10);
357
	if (ret)
358
		return ret;
359
 
360
	seqno = i915_gem_get_seqno(ring->dev);
361
	update_semaphore(ring, 0, seqno);
362
	update_semaphore(ring, 1, seqno);
363
 
364
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
365
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
366
	intel_ring_emit(ring, seqno);
367
	intel_ring_emit(ring, MI_USER_INTERRUPT);
368
	intel_ring_advance(ring);
369
 
370
	*result = seqno;
371
	return 0;
372
}
373
 
374
int
375
intel_ring_sync(struct intel_ring_buffer *ring,
376
		struct intel_ring_buffer *to,
377
		u32 seqno)
378
{
379
	int ret;
380
 
381
	ret = intel_ring_begin(ring, 4);
382
	if (ret)
383
		return ret;
384
 
385
	intel_ring_emit(ring,
386
			MI_SEMAPHORE_MBOX |
387
			MI_SEMAPHORE_REGISTER |
388
			intel_ring_sync_index(ring, to) << 17 |
389
			MI_SEMAPHORE_COMPARE);
390
	intel_ring_emit(ring, seqno);
391
	intel_ring_emit(ring, 0);
392
	intel_ring_emit(ring, MI_NOOP);
393
	intel_ring_advance(ring);
394
 
395
	return 0;
396
}
397
 
398
#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
399
do {									\
400
	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
401
		 PIPE_CONTROL_DEPTH_STALL | 2);				\
402
	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
403
	intel_ring_emit(ring__, 0);							\
404
	intel_ring_emit(ring__, 0);							\
405
} while (0)
406
 
407
static int
408
pc_render_add_request(struct intel_ring_buffer *ring,
409
		      u32 *result)
410
{
411
	struct drm_device *dev = ring->dev;
412
	u32 seqno = i915_gem_get_seqno(dev);
413
	struct pipe_control *pc = ring->private;
414
	u32 scratch_addr = pc->gtt_offset + 128;
415
	int ret;
416
 
417
	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
418
	 * incoherent with writes to memory, i.e. completely fubar,
419
	 * so we need to use PIPE_NOTIFY instead.
420
	 *
421
	 * However, we also need to workaround the qword write
422
	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
423
	 * memory before requesting an interrupt.
424
	 */
425
	ret = intel_ring_begin(ring, 32);
426
	if (ret)
427
		return ret;
428
 
429
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
430
			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
431
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
432
	intel_ring_emit(ring, seqno);
433
	intel_ring_emit(ring, 0);
434
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
435
	scratch_addr += 128; /* write to separate cachelines */
436
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
437
	scratch_addr += 128;
438
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
439
	scratch_addr += 128;
440
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
441
	scratch_addr += 128;
442
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
443
	scratch_addr += 128;
444
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
445
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
446
			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
447
			PIPE_CONTROL_NOTIFY);
448
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
449
	intel_ring_emit(ring, seqno);
450
	intel_ring_emit(ring, 0);
451
	intel_ring_advance(ring);
452
 
453
	*result = seqno;
454
	return 0;
455
}
456
 
457
static int
458
render_ring_add_request(struct intel_ring_buffer *ring,
459
			u32 *result)
460
{
461
	struct drm_device *dev = ring->dev;
462
	u32 seqno = i915_gem_get_seqno(dev);
463
	int ret;
464
 
465
	ret = intel_ring_begin(ring, 4);
466
	if (ret)
467
		return ret;
468
 
469
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
470
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
471
	intel_ring_emit(ring, seqno);
472
	intel_ring_emit(ring, MI_USER_INTERRUPT);
473
	intel_ring_advance(ring);
474
 
475
	*result = seqno;
476
	return 0;
477
}
478
 
479
static u32
480
ring_get_seqno(struct intel_ring_buffer *ring)
481
{
482
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
483
}
484
 
485
static u32
486
pc_render_get_seqno(struct intel_ring_buffer *ring)
487
{
488
	struct pipe_control *pc = ring->private;
489
	return pc->cpu_page[0];
490
}
491
 
492
static void
493
ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
494
{
495
	dev_priv->gt_irq_mask &= ~mask;
496
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
497
	POSTING_READ(GTIMR);
498
}
499
 
500
static void
501
ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
502
{
503
	dev_priv->gt_irq_mask |= mask;
504
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
505
	POSTING_READ(GTIMR);
506
}
507
 
508
static void
509
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
510
{
511
	dev_priv->irq_mask &= ~mask;
512
	I915_WRITE(IMR, dev_priv->irq_mask);
513
	POSTING_READ(IMR);
514
}
515
 
516
static void
517
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
518
{
519
	dev_priv->irq_mask |= mask;
520
	I915_WRITE(IMR, dev_priv->irq_mask);
521
	POSTING_READ(IMR);
522
}
523
 
2339 Serge 524
#if 0
2332 Serge 525
static bool
526
render_ring_get_irq(struct intel_ring_buffer *ring)
527
{
528
	struct drm_device *dev = ring->dev;
529
	drm_i915_private_t *dev_priv = dev->dev_private;
530
 
531
	if (!dev->irq_enabled)
532
		return false;
533
 
534
	spin_lock(&ring->irq_lock);
535
	if (ring->irq_refcount++ == 0) {
536
		if (HAS_PCH_SPLIT(dev))
537
			ironlake_enable_irq(dev_priv,
538
					    GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
539
		else
540
			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
541
	}
542
	spin_unlock(&ring->irq_lock);
543
 
544
	return true;
545
}
546
 
547
static void
548
render_ring_put_irq(struct intel_ring_buffer *ring)
549
{
550
	struct drm_device *dev = ring->dev;
551
	drm_i915_private_t *dev_priv = dev->dev_private;
552
 
553
	spin_lock(&ring->irq_lock);
554
	if (--ring->irq_refcount == 0) {
555
		if (HAS_PCH_SPLIT(dev))
556
			ironlake_disable_irq(dev_priv,
557
					     GT_USER_INTERRUPT |
558
					     GT_PIPE_NOTIFY);
559
		else
560
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
561
	}
562
	spin_unlock(&ring->irq_lock);
563
}
564
 
565
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
566
{
567
	struct drm_device *dev = ring->dev;
568
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
569
	u32 mmio = 0;
570
 
571
	/* The ring status page addresses are no longer next to the rest of
572
	 * the ring registers as of gen7.
573
	 */
574
	if (IS_GEN7(dev)) {
575
		switch (ring->id) {
576
		case RING_RENDER:
577
			mmio = RENDER_HWS_PGA_GEN7;
578
			break;
579
		case RING_BLT:
580
			mmio = BLT_HWS_PGA_GEN7;
581
			break;
582
		case RING_BSD:
583
			mmio = BSD_HWS_PGA_GEN7;
584
			break;
585
		}
586
	} else if (IS_GEN6(ring->dev)) {
587
		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
588
	} else {
589
		mmio = RING_HWS_PGA(ring->mmio_base);
590
	}
591
 
592
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
593
	POSTING_READ(mmio);
594
}
595
#endif
596
 
597
static int
598
bsd_ring_flush(struct intel_ring_buffer *ring,
599
	       u32     invalidate_domains,
600
	       u32     flush_domains)
601
{
602
	int ret;
603
 
604
	ret = intel_ring_begin(ring, 2);
605
	if (ret)
606
		return ret;
607
 
608
	intel_ring_emit(ring, MI_FLUSH);
609
	intel_ring_emit(ring, MI_NOOP);
610
	intel_ring_advance(ring);
611
	return 0;
612
}
613
 
614
static int
615
ring_add_request(struct intel_ring_buffer *ring,
616
		 u32 *result)
617
{
618
	u32 seqno;
619
	int ret;
620
 
621
	ret = intel_ring_begin(ring, 4);
622
	if (ret)
623
		return ret;
624
 
625
	seqno = i915_gem_get_seqno(ring->dev);
626
 
627
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
628
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
629
	intel_ring_emit(ring, seqno);
630
	intel_ring_emit(ring, MI_USER_INTERRUPT);
631
	intel_ring_advance(ring);
632
 
633
	*result = seqno;
634
	return 0;
635
}
636
 
2339 Serge 637
#if 0
638
 
2332 Serge 639
static bool
640
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
641
{
642
	struct drm_device *dev = ring->dev;
643
	drm_i915_private_t *dev_priv = dev->dev_private;
644
 
645
	if (!dev->irq_enabled)
646
	       return false;
647
 
648
	spin_lock(&ring->irq_lock);
649
	if (ring->irq_refcount++ == 0) {
650
		ring->irq_mask &= ~rflag;
651
		I915_WRITE_IMR(ring, ring->irq_mask);
652
		ironlake_enable_irq(dev_priv, gflag);
653
	}
654
	spin_unlock(&ring->irq_lock);
655
 
656
	return true;
657
}
658
 
659
static void
660
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
661
{
662
	struct drm_device *dev = ring->dev;
663
	drm_i915_private_t *dev_priv = dev->dev_private;
664
 
665
	spin_lock(&ring->irq_lock);
666
	if (--ring->irq_refcount == 0) {
667
		ring->irq_mask |= rflag;
668
		I915_WRITE_IMR(ring, ring->irq_mask);
669
		ironlake_disable_irq(dev_priv, gflag);
670
	}
671
	spin_unlock(&ring->irq_lock);
672
}
673
 
674
static bool
675
bsd_ring_get_irq(struct intel_ring_buffer *ring)
676
{
677
	struct drm_device *dev = ring->dev;
678
	drm_i915_private_t *dev_priv = dev->dev_private;
679
 
680
	if (!dev->irq_enabled)
681
		return false;
682
 
683
	spin_lock(&ring->irq_lock);
684
	if (ring->irq_refcount++ == 0) {
685
		if (IS_G4X(dev))
686
			i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
687
		else
688
			ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
689
	}
690
	spin_unlock(&ring->irq_lock);
691
 
692
	return true;
693
}
694
static void
695
bsd_ring_put_irq(struct intel_ring_buffer *ring)
696
{
697
	struct drm_device *dev = ring->dev;
698
	drm_i915_private_t *dev_priv = dev->dev_private;
699
 
700
	spin_lock(&ring->irq_lock);
701
	if (--ring->irq_refcount == 0) {
702
		if (IS_G4X(dev))
703
			i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
704
		else
705
			ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
706
	}
707
	spin_unlock(&ring->irq_lock);
708
}
709
 
710
static int
711
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
712
{
713
	int ret;
714
 
715
	ret = intel_ring_begin(ring, 2);
716
	if (ret)
717
		return ret;
718
 
719
	intel_ring_emit(ring,
720
			MI_BATCH_BUFFER_START | (2 << 6) |
721
			MI_BATCH_NON_SECURE_I965);
722
	intel_ring_emit(ring, offset);
723
	intel_ring_advance(ring);
724
 
725
	return 0;
726
}
727
 
728
static int
729
render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
730
				u32 offset, u32 len)
731
{
732
	struct drm_device *dev = ring->dev;
733
	int ret;
734
 
735
	if (IS_I830(dev) || IS_845G(dev)) {
736
		ret = intel_ring_begin(ring, 4);
737
		if (ret)
738
			return ret;
739
 
740
		intel_ring_emit(ring, MI_BATCH_BUFFER);
741
		intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
742
		intel_ring_emit(ring, offset + len - 8);
743
		intel_ring_emit(ring, 0);
744
	} else {
745
		ret = intel_ring_begin(ring, 2);
746
		if (ret)
747
			return ret;
748
 
749
		if (INTEL_INFO(dev)->gen >= 4) {
750
			intel_ring_emit(ring,
751
					MI_BATCH_BUFFER_START | (2 << 6) |
752
					MI_BATCH_NON_SECURE_I965);
753
			intel_ring_emit(ring, offset);
754
		} else {
755
			intel_ring_emit(ring,
756
					MI_BATCH_BUFFER_START | (2 << 6));
757
			intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
758
		}
759
	}
760
	intel_ring_advance(ring);
761
 
762
	return 0;
763
}
764
 
765
static void cleanup_status_page(struct intel_ring_buffer *ring)
766
{
767
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
768
	struct drm_i915_gem_object *obj;
769
 
770
	obj = ring->status_page.obj;
771
	if (obj == NULL)
772
		return;
773
 
774
	kunmap(obj->pages[0]);
2339 Serge 775
//   i915_gem_object_unpin(obj);
776
//   drm_gem_object_unreference(&obj->base);
2332 Serge 777
	ring->status_page.obj = NULL;
778
 
779
	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
780
}
781
 
782
static int init_status_page(struct intel_ring_buffer *ring)
783
{
784
	struct drm_device *dev = ring->dev;
785
	drm_i915_private_t *dev_priv = dev->dev_private;
786
	struct drm_i915_gem_object *obj;
787
	int ret;
788
 
789
	obj = i915_gem_alloc_object(dev, 4096);
790
	if (obj == NULL) {
791
		DRM_ERROR("Failed to allocate status page\n");
792
		ret = -ENOMEM;
793
		goto err;
794
	}
795
 
2339 Serge 796
//   i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2332 Serge 797
 
798
	ret = i915_gem_object_pin(obj, 4096, true);
799
	if (ret != 0) {
800
		goto err_unref;
801
	}
802
 
803
	ring->status_page.gfx_addr = obj->gtt_offset;
804
	ring->status_page.page_addr = kmap(obj->pages[0]);
805
	if (ring->status_page.page_addr == NULL) {
806
		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
807
		goto err_unpin;
808
	}
809
	ring->status_page.obj = obj;
810
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
811
 
812
	intel_ring_setup_status_page(ring);
813
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
814
			ring->name, ring->status_page.gfx_addr);
815
 
816
	return 0;
817
 
818
err_unpin:
819
	i915_gem_object_unpin(obj);
820
err_unref:
821
	drm_gem_object_unreference(&obj->base);
822
err:
823
	return ret;
824
}
825
#endif
826
 
827
int intel_init_ring_buffer(struct drm_device *dev,
828
			   struct intel_ring_buffer *ring)
829
{
830
    struct drm_i915_gem_object *obj=NULL;
831
	int ret;
832
    ENTER();
833
	ring->dev = dev;
834
	INIT_LIST_HEAD(&ring->active_list);
835
	INIT_LIST_HEAD(&ring->request_list);
836
	INIT_LIST_HEAD(&ring->gpu_write_list);
837
 
838
//   init_waitqueue_head(&ring->irq_queue);
839
//   spin_lock_init(&ring->irq_lock);
840
    ring->irq_mask = ~0;
841
 
842
	if (I915_NEED_GFX_HWS(dev)) {
843
//       ret = init_status_page(ring);
844
//       if (ret)
845
//           return ret;
846
	}
847
 
848
    obj = i915_gem_alloc_object(dev, ring->size);
849
	if (obj == NULL) {
850
		DRM_ERROR("Failed to allocate ringbuffer\n");
851
		ret = -ENOMEM;
852
		goto err_hws;
853
	}
854
 
855
	ring->obj = obj;
856
 
857
    ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
858
	if (ret)
859
		goto err_unref;
860
 
861
	ring->map.size = ring->size;
862
    ring->map.offset = get_bus_addr() + obj->gtt_offset;
863
	ring->map.type = 0;
864
	ring->map.flags = 0;
865
	ring->map.mtrr = 0;
866
 
867
//   drm_core_ioremap_wc(&ring->map, dev);
868
 
869
    ring->map.handle = ioremap(ring->map.offset, ring->map.size);
870
 
871
	if (ring->map.handle == NULL) {
872
		DRM_ERROR("Failed to map ringbuffer.\n");
873
		ret = -EINVAL;
874
		goto err_unpin;
875
	}
876
 
877
	ring->virtual_start = ring->map.handle;
878
	ret = ring->init(ring);
879
	if (ret)
880
		goto err_unmap;
881
 
882
	/* Workaround an erratum on the i830 which causes a hang if
883
	 * the TAIL pointer points to within the last 2 cachelines
884
	 * of the buffer.
885
	 */
886
	ring->effective_size = ring->size;
887
	if (IS_I830(ring->dev))
888
		ring->effective_size -= 128;
889
    LEAVE();
890
	return 0;
891
 
892
err_unmap:
893
//   drm_core_ioremapfree(&ring->map, dev);
894
    FreeKernelSpace(ring->virtual_start);
895
err_unpin:
896
//   i915_gem_object_unpin(obj);
897
err_unref:
898
//   drm_gem_object_unreference(&obj->base);
899
	ring->obj = NULL;
900
err_hws:
901
//   cleanup_status_page(ring);
902
	return ret;
903
}
904
 
905
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
906
{
907
	struct drm_i915_private *dev_priv;
908
	int ret;
909
 
910
	if (ring->obj == NULL)
911
		return;
912
 
913
	/* Disable the ring buffer. The ring must be idle at this point */
914
	dev_priv = ring->dev->dev_private;
915
	ret = intel_wait_ring_idle(ring);
916
	if (ret)
917
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
918
			  ring->name, ret);
919
 
920
	I915_WRITE_CTL(ring, 0);
921
 
922
//   drm_core_ioremapfree(&ring->map, ring->dev);
923
 
924
//   i915_gem_object_unpin(ring->obj);
925
//   drm_gem_object_unreference(&ring->obj->base);
926
	ring->obj = NULL;
927
 
928
	if (ring->cleanup)
929
		ring->cleanup(ring);
930
 
931
//   cleanup_status_page(ring);
932
}
933
 
934
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
935
{
936
	unsigned int *virt;
937
	int rem = ring->size - ring->tail;
938
 
939
	if (ring->space < rem) {
940
		int ret = intel_wait_ring_buffer(ring, rem);
941
		if (ret)
942
			return ret;
943
	}
944
 
945
	virt = (unsigned int *)(ring->virtual_start + ring->tail);
946
	rem /= 8;
947
	while (rem--) {
948
		*virt++ = MI_NOOP;
949
		*virt++ = MI_NOOP;
950
	}
951
 
952
	ring->tail = 0;
953
	ring->space = ring_space(ring);
954
 
955
	return 0;
956
}
957
 
958
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
959
{
960
	struct drm_device *dev = ring->dev;
961
	struct drm_i915_private *dev_priv = dev->dev_private;
962
	unsigned long end;
963
	u32 head;
964
 
965
	/* If the reported head position has wrapped or hasn't advanced,
966
	 * fallback to the slow and accurate path.
967
	 */
968
	head = intel_read_status_page(ring, 4);
969
	if (head > ring->head) {
970
		ring->head = head;
971
		ring->space = ring_space(ring);
972
		if (ring->space >= n)
973
			return 0;
974
	}
975
 
976
//   trace_i915_ring_wait_begin(ring);
977
	end = jiffies + 3 * HZ;
978
	do {
979
		ring->head = I915_READ_HEAD(ring);
980
		ring->space = ring_space(ring);
981
		if (ring->space >= n) {
982
//           trace_i915_ring_wait_end(ring);
983
			return 0;
984
		}
985
 
986
		if (dev->primary->master) {
987
			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
988
			if (master_priv->sarea_priv)
989
				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
990
		}
991
 
992
		msleep(1);
993
		if (atomic_read(&dev_priv->mm.wedged))
994
			return -EAGAIN;
995
	} while (!time_after(jiffies, end));
996
//   trace_i915_ring_wait_end(ring);
997
	return -EBUSY;
998
}
999
 
1000
int intel_ring_begin(struct intel_ring_buffer *ring,
1001
		     int num_dwords)
1002
{
1003
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1004
	int n = 4*num_dwords;
1005
	int ret;
1006
 
1007
	if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1008
		return -EIO;
1009
 
1010
	if (unlikely(ring->tail + n > ring->effective_size)) {
1011
		ret = intel_wrap_ring_buffer(ring);
1012
		if (unlikely(ret))
1013
			return ret;
1014
	}
1015
 
1016
	if (unlikely(ring->space < n)) {
1017
		ret = intel_wait_ring_buffer(ring, n);
1018
		if (unlikely(ret))
1019
			return ret;
1020
	}
1021
 
1022
	ring->space -= n;
1023
	return 0;
1024
}
1025
 
1026
void intel_ring_advance(struct intel_ring_buffer *ring)
1027
{
1028
	ring->tail &= ring->size - 1;
1029
	ring->write_tail(ring, ring->tail);
1030
}
1031
 
1032
static const struct intel_ring_buffer render_ring = {
1033
	.name			= "render ring",
1034
	.id			= RING_RENDER,
1035
	.mmio_base		= RENDER_RING_BASE,
1036
	.size			= 32 * PAGE_SIZE,
1037
	.init			= init_render_ring,
1038
    .write_tail     = ring_write_tail,
1039
    .flush          = render_ring_flush,
2339 Serge 1040
    .add_request        = render_ring_add_request,
2332 Serge 1041
//   .get_seqno      = ring_get_seqno,
1042
//   .irq_get        = render_ring_get_irq,
1043
//   .irq_put        = render_ring_put_irq,
1044
//   .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1045
//       .cleanup            = render_ring_cleanup,
1046
};
1047
 
1048
/* ring buffer for bit-stream decoder */
1049
 
1050
static const struct intel_ring_buffer bsd_ring = {
1051
	.name                   = "bsd ring",
1052
	.id			= RING_BSD,
1053
	.mmio_base		= BSD_RING_BASE,
1054
	.size			= 32 * PAGE_SIZE,
1055
	.init			= init_ring_common,
1056
	.write_tail		= ring_write_tail,
1057
    .flush          = bsd_ring_flush,
2339 Serge 1058
    .add_request        = ring_add_request,
2332 Serge 1059
//   .get_seqno      = ring_get_seqno,
1060
//   .irq_get        = bsd_ring_get_irq,
1061
//   .irq_put        = bsd_ring_put_irq,
1062
//   .dispatch_execbuffer    = ring_dispatch_execbuffer,
1063
};
1064
 
1065
 
1066
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1067
				     u32 value)
1068
{
1069
       drm_i915_private_t *dev_priv = ring->dev->dev_private;
1070
 
1071
       /* Every tail move must follow the sequence below */
1072
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1073
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1074
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1075
       I915_WRITE(GEN6_BSD_RNCID, 0x0);
1076
 
1077
       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1078
                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1079
                       50))
1080
               DRM_ERROR("timed out waiting for IDLE Indicator\n");
1081
 
1082
       I915_WRITE_TAIL(ring, value);
1083
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1084
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1085
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1086
}
1087
 
1088
static int gen6_ring_flush(struct intel_ring_buffer *ring,
1089
			   u32 invalidate, u32 flush)
1090
{
1091
	uint32_t cmd;
1092
	int ret;
1093
 
1094
	ret = intel_ring_begin(ring, 4);
1095
	if (ret)
1096
		return ret;
1097
 
1098
	cmd = MI_FLUSH_DW;
1099
	if (invalidate & I915_GEM_GPU_DOMAINS)
1100
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1101
	intel_ring_emit(ring, cmd);
1102
	intel_ring_emit(ring, 0);
1103
	intel_ring_emit(ring, 0);
1104
	intel_ring_emit(ring, MI_NOOP);
1105
	intel_ring_advance(ring);
1106
	return 0;
1107
}
1108
 
1109
#if 0
1110
static int
1111
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1112
			      u32 offset, u32 len)
1113
{
1114
       int ret;
1115
 
1116
       ret = intel_ring_begin(ring, 2);
1117
       if (ret)
1118
	       return ret;
1119
 
1120
       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1121
       /* bit0-7 is the length on GEN6+ */
1122
       intel_ring_emit(ring, offset);
1123
       intel_ring_advance(ring);
1124
 
1125
       return 0;
1126
}
1127
 
1128
static bool
1129
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1130
{
1131
	return gen6_ring_get_irq(ring,
1132
				 GT_USER_INTERRUPT,
1133
				 GEN6_RENDER_USER_INTERRUPT);
1134
}
1135
 
1136
static void
1137
gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1138
{
1139
	return gen6_ring_put_irq(ring,
1140
				 GT_USER_INTERRUPT,
1141
				 GEN6_RENDER_USER_INTERRUPT);
1142
}
1143
 
1144
static bool
1145
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1146
{
1147
	return gen6_ring_get_irq(ring,
1148
				 GT_GEN6_BSD_USER_INTERRUPT,
1149
				 GEN6_BSD_USER_INTERRUPT);
1150
}
1151
 
1152
static void
1153
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1154
{
1155
	return gen6_ring_put_irq(ring,
1156
				 GT_GEN6_BSD_USER_INTERRUPT,
1157
				 GEN6_BSD_USER_INTERRUPT);
1158
}
1159
 
1160
#endif
1161
 
1162
/* ring buffer for Video Codec for Gen6+ */
1163
static const struct intel_ring_buffer gen6_bsd_ring = {
1164
	.name			= "gen6 bsd ring",
1165
	.id			= RING_BSD,
1166
	.mmio_base		= GEN6_BSD_RING_BASE,
1167
	.size			= 32 * PAGE_SIZE,
1168
	.init			= init_ring_common,
1169
	.write_tail		= gen6_bsd_ring_write_tail,
1170
    .flush          = gen6_ring_flush,
2339 Serge 1171
    .add_request        = gen6_add_request,
2332 Serge 1172
//   .get_seqno      = ring_get_seqno,
1173
//   .irq_get        = gen6_bsd_ring_get_irq,
1174
//   .irq_put        = gen6_bsd_ring_put_irq,
1175
//   .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1176
};
1177
 
1178
#if 0
1179
/* Blitter support (SandyBridge+) */
1180
 
1181
static bool
1182
blt_ring_get_irq(struct intel_ring_buffer *ring)
1183
{
1184
	return gen6_ring_get_irq(ring,
1185
				 GT_BLT_USER_INTERRUPT,
1186
				 GEN6_BLITTER_USER_INTERRUPT);
1187
}
1188
 
1189
static void
1190
blt_ring_put_irq(struct intel_ring_buffer *ring)
1191
{
1192
	gen6_ring_put_irq(ring,
1193
			  GT_BLT_USER_INTERRUPT,
1194
			  GEN6_BLITTER_USER_INTERRUPT);
1195
}
1196
#endif
1197
 
1198
 
1199
/* Workaround for some stepping of SNB,
1200
 * each time when BLT engine ring tail moved,
1201
 * the first command in the ring to be parsed
1202
 * should be MI_BATCH_BUFFER_START
1203
 */
1204
#define NEED_BLT_WORKAROUND(dev) \
1205
	(IS_GEN6(dev) && (dev->pdev->revision < 8))
1206
 
1207
static inline struct drm_i915_gem_object *
1208
to_blt_workaround(struct intel_ring_buffer *ring)
1209
{
1210
	return ring->private;
1211
}
1212
 
1213
static int blt_ring_init(struct intel_ring_buffer *ring)
1214
{
1215
	if (NEED_BLT_WORKAROUND(ring->dev)) {
1216
		struct drm_i915_gem_object *obj;
1217
		u32 *ptr;
1218
		int ret;
1219
 
1220
		obj = i915_gem_alloc_object(ring->dev, 4096);
1221
		if (obj == NULL)
1222
			return -ENOMEM;
1223
 
1224
		ret = i915_gem_object_pin(obj, 4096, true);
1225
		if (ret) {
1226
//           drm_gem_object_unreference(&obj->base);
1227
			return ret;
1228
		}
1229
 
1230
        ptr = ioremap(obj->pages[0], 4096);
1231
		*ptr++ = MI_BATCH_BUFFER_END;
1232
		*ptr++ = MI_NOOP;
2335 Serge 1233
//        iounmap(obj->pages[0]);
2332 Serge 1234
 
1235
		ret = i915_gem_object_set_to_gtt_domain(obj, false);
1236
		if (ret) {
1237
//           i915_gem_object_unpin(obj);
1238
//           drm_gem_object_unreference(&obj->base);
1239
			return ret;
1240
		}
1241
 
1242
		ring->private = obj;
1243
	}
1244
 
1245
	return init_ring_common(ring);
1246
}
1247
 
1248
static int blt_ring_begin(struct intel_ring_buffer *ring,
1249
			  int num_dwords)
1250
{
1251
	if (ring->private) {
1252
		int ret = intel_ring_begin(ring, num_dwords+2);
1253
		if (ret)
1254
			return ret;
1255
 
1256
		intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1257
		intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1258
 
1259
		return 0;
1260
	} else
1261
		return intel_ring_begin(ring, 4);
1262
}
1263
 
1264
static int blt_ring_flush(struct intel_ring_buffer *ring,
1265
			  u32 invalidate, u32 flush)
1266
{
1267
	uint32_t cmd;
1268
	int ret;
1269
 
1270
	ret = blt_ring_begin(ring, 4);
1271
	if (ret)
1272
		return ret;
1273
 
1274
	cmd = MI_FLUSH_DW;
1275
	if (invalidate & I915_GEM_DOMAIN_RENDER)
1276
		cmd |= MI_INVALIDATE_TLB;
1277
	intel_ring_emit(ring, cmd);
1278
	intel_ring_emit(ring, 0);
1279
	intel_ring_emit(ring, 0);
1280
	intel_ring_emit(ring, MI_NOOP);
1281
	intel_ring_advance(ring);
1282
	return 0;
1283
}
1284
 
1285
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1286
{
1287
	if (!ring->private)
1288
		return;
1289
 
1290
	i915_gem_object_unpin(ring->private);
1291
	drm_gem_object_unreference(ring->private);
1292
	ring->private = NULL;
1293
}
1294
 
1295
static const struct intel_ring_buffer gen6_blt_ring = {
1296
       .name			= "blt ring",
1297
       .id			= RING_BLT,
1298
       .mmio_base		= BLT_RING_BASE,
1299
       .size			= 32 * PAGE_SIZE,
1300
       .init			= blt_ring_init,
1301
       .write_tail		= ring_write_tail,
1302
       .flush          = blt_ring_flush,
2339 Serge 1303
       .add_request        = gen6_add_request,
2332 Serge 1304
//       .get_seqno      = ring_get_seqno,
1305
//       .irq_get            = blt_ring_get_irq,
1306
//       .irq_put            = blt_ring_put_irq,
1307
//       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1308
//       .cleanup            = blt_ring_cleanup,
1309
};
1310
 
1311
int intel_init_render_ring_buffer(struct drm_device *dev)
1312
{
1313
	drm_i915_private_t *dev_priv = dev->dev_private;
1314
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1315
    ENTER();
1316
	*ring = render_ring;
1317
	if (INTEL_INFO(dev)->gen >= 6) {
2339 Serge 1318
       ring->add_request = gen6_add_request;
2332 Serge 1319
//       ring->irq_get = gen6_render_ring_get_irq;
1320
//       ring->irq_put = gen6_render_ring_put_irq;
1321
	} else if (IS_GEN5(dev)) {
2339 Serge 1322
       ring->add_request = pc_render_add_request;
2332 Serge 1323
//       ring->get_seqno = pc_render_get_seqno;
1324
	}
1325
 
1326
	if (!I915_NEED_GFX_HWS(dev)) {
1327
		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1328
		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1329
	}
1330
    LEAVE();
1331
	return intel_init_ring_buffer(dev, ring);
1332
}
1333
 
1334
 
1335
int intel_init_bsd_ring_buffer(struct drm_device *dev)
1336
{
1337
	drm_i915_private_t *dev_priv = dev->dev_private;
1338
	struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1339
 
1340
	if (IS_GEN6(dev) || IS_GEN7(dev))
1341
		*ring = gen6_bsd_ring;
1342
	else
1343
		*ring = bsd_ring;
1344
 
1345
	return intel_init_ring_buffer(dev, ring);
1346
}
1347
 
1348
int intel_init_blt_ring_buffer(struct drm_device *dev)
1349
{
1350
	drm_i915_private_t *dev_priv = dev->dev_private;
1351
	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1352
 
1353
	*ring = gen6_blt_ring;
1354
 
1355
	return intel_init_ring_buffer(dev, ring);
1356
}