Subversion Repositories Kolibri OS

Rev

Rev 2335 | Rev 2339 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2332 Serge 1
/*
2
 * Copyright © 2008-2010 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *    Zou Nan hai 
26
 *    Xiang Hai hao
27
 *
28
 */
29
#define iowrite32(v, addr)      writel((v), (addr))
30
#define ioread32(addr)          readl(addr)
31
 
32
#include "drmP.h"
33
#include "drm.h"
34
#include "i915_drv.h"
35
#include "i915_drm.h"
36
//#include "i915_trace.h"
37
#include "intel_drv.h"
38
 
39
static inline int ring_space(struct intel_ring_buffer *ring)
40
{
41
	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
42
	if (space < 0)
43
		space += ring->size;
44
	return space;
45
}
46
 
47
static u32 i915_gem_get_seqno(struct drm_device *dev)
48
{
49
	drm_i915_private_t *dev_priv = dev->dev_private;
50
	u32 seqno;
51
 
52
	seqno = dev_priv->next_seqno;
53
 
54
	/* reserve 0 for non-seqno */
55
	if (++dev_priv->next_seqno == 0)
56
		dev_priv->next_seqno = 1;
57
 
58
	return seqno;
59
}
60
 
61
static int
62
render_ring_flush(struct intel_ring_buffer *ring,
63
		  u32	invalidate_domains,
64
		  u32	flush_domains)
65
{
66
	struct drm_device *dev = ring->dev;
67
	u32 cmd;
68
	int ret;
69
 
70
	/*
71
	 * read/write caches:
72
	 *
73
	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
74
	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
75
	 * also flushed at 2d versus 3d pipeline switches.
76
	 *
77
	 * read-only caches:
78
	 *
79
	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
80
	 * MI_READ_FLUSH is set, and is always flushed on 965.
81
	 *
82
	 * I915_GEM_DOMAIN_COMMAND may not exist?
83
	 *
84
	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
85
	 * invalidated when MI_EXE_FLUSH is set.
86
	 *
87
	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
88
	 * invalidated with every MI_FLUSH.
89
	 *
90
	 * TLBs:
91
	 *
92
	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
93
	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
94
	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
95
	 * are flushed at any MI_FLUSH.
96
	 */
97
 
98
	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
99
	if ((invalidate_domains|flush_domains) &
100
	    I915_GEM_DOMAIN_RENDER)
101
		cmd &= ~MI_NO_WRITE_FLUSH;
102
	if (INTEL_INFO(dev)->gen < 4) {
103
		/*
104
		 * On the 965, the sampler cache always gets flushed
105
		 * and this bit is reserved.
106
		 */
107
		if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
108
			cmd |= MI_READ_FLUSH;
109
	}
110
	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
111
		cmd |= MI_EXE_FLUSH;
112
 
113
	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
114
	    (IS_G4X(dev) || IS_GEN5(dev)))
115
		cmd |= MI_INVALIDATE_ISP;
116
 
117
	ret = intel_ring_begin(ring, 2);
118
	if (ret)
119
		return ret;
120
 
121
	intel_ring_emit(ring, cmd);
122
	intel_ring_emit(ring, MI_NOOP);
123
	intel_ring_advance(ring);
124
 
125
	return 0;
126
}
127
 
128
static void ring_write_tail(struct intel_ring_buffer *ring,
129
			    u32 value)
130
{
131
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
132
	I915_WRITE_TAIL(ring, value);
133
}
134
 
135
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
136
{
137
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
138
	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
139
			RING_ACTHD(ring->mmio_base) : ACTHD;
140
 
141
	return I915_READ(acthd_reg);
142
}
143
 
144
static int init_ring_common(struct intel_ring_buffer *ring)
145
{
146
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
147
	struct drm_i915_gem_object *obj = ring->obj;
148
	u32 head;
149
 
150
    ENTER();
151
 
152
	/* Stop the ring if it's running. */
153
	I915_WRITE_CTL(ring, 0);
154
	I915_WRITE_HEAD(ring, 0);
155
	ring->write_tail(ring, 0);
156
 
157
	/* Initialize the ring. */
158
	I915_WRITE_START(ring, obj->gtt_offset);
159
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
160
 
161
	/* G45 ring initialization fails to reset head to zero */
162
	if (head != 0) {
163
		DRM_DEBUG_KMS("%s head not reset to zero "
164
			      "ctl %08x head %08x tail %08x start %08x\n",
165
			      ring->name,
166
			      I915_READ_CTL(ring),
167
			      I915_READ_HEAD(ring),
168
			      I915_READ_TAIL(ring),
169
			      I915_READ_START(ring));
170
 
171
		I915_WRITE_HEAD(ring, 0);
172
 
173
		if (I915_READ_HEAD(ring) & HEAD_ADDR) {
174
			DRM_ERROR("failed to set %s head to zero "
175
				  "ctl %08x head %08x tail %08x start %08x\n",
176
				  ring->name,
177
				  I915_READ_CTL(ring),
178
				  I915_READ_HEAD(ring),
179
				  I915_READ_TAIL(ring),
180
				  I915_READ_START(ring));
181
		}
182
	}
183
 
184
	I915_WRITE_CTL(ring,
185
			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
186
			| RING_REPORT_64K | RING_VALID);
187
 
188
	/* If the head is still not zero, the ring is dead */
189
	if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
190
	    I915_READ_START(ring) != obj->gtt_offset ||
191
	    (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
192
		DRM_ERROR("%s initialization failed "
193
				"ctl %08x head %08x tail %08x start %08x\n",
194
				ring->name,
195
				I915_READ_CTL(ring),
196
				I915_READ_HEAD(ring),
197
				I915_READ_TAIL(ring),
198
				I915_READ_START(ring));
199
		return -EIO;
200
	}
201
 
202
    ring->head = I915_READ_HEAD(ring);
203
    ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
204
    ring->space = ring_space(ring);
205
 
206
    LEAVE();
207
 
208
	return 0;
209
}
210
 
211
#if 0
212
 
213
/*
214
 * 965+ support PIPE_CONTROL commands, which provide finer grained control
215
 * over cache flushing.
216
 */
217
struct pipe_control {
218
	struct drm_i915_gem_object *obj;
219
	volatile u32 *cpu_page;
220
	u32 gtt_offset;
221
};
222
 
223
static int
224
init_pipe_control(struct intel_ring_buffer *ring)
225
{
226
	struct pipe_control *pc;
227
	struct drm_i915_gem_object *obj;
228
	int ret;
229
 
230
	if (ring->private)
231
		return 0;
232
 
233
	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
234
	if (!pc)
235
		return -ENOMEM;
236
 
237
	obj = i915_gem_alloc_object(ring->dev, 4096);
238
	if (obj == NULL) {
239
		DRM_ERROR("Failed to allocate seqno page\n");
240
		ret = -ENOMEM;
241
		goto err;
242
	}
243
 
244
	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
245
 
246
	ret = i915_gem_object_pin(obj, 4096, true);
247
	if (ret)
248
		goto err_unref;
249
 
250
	pc->gtt_offset = obj->gtt_offset;
251
	pc->cpu_page =  kmap(obj->pages[0]);
252
	if (pc->cpu_page == NULL)
253
		goto err_unpin;
254
 
255
	pc->obj = obj;
256
	ring->private = pc;
257
	return 0;
258
 
259
err_unpin:
260
	i915_gem_object_unpin(obj);
261
err_unref:
262
	drm_gem_object_unreference(&obj->base);
263
err:
264
	kfree(pc);
265
	return ret;
266
}
267
 
268
static void
269
cleanup_pipe_control(struct intel_ring_buffer *ring)
270
{
271
	struct pipe_control *pc = ring->private;
272
	struct drm_i915_gem_object *obj;
273
 
274
	if (!ring->private)
275
		return;
276
 
277
	obj = pc->obj;
278
	kunmap(obj->pages[0]);
279
	i915_gem_object_unpin(obj);
280
	drm_gem_object_unreference(&obj->base);
281
 
282
	kfree(pc);
283
	ring->private = NULL;
284
}
285
 
286
#endif
287
 
288
static int init_render_ring(struct intel_ring_buffer *ring)
289
{
290
	struct drm_device *dev = ring->dev;
291
	struct drm_i915_private *dev_priv = dev->dev_private;
292
 
293
    ENTER();
294
 
295
	int ret = init_ring_common(ring);
296
 
297
	if (INTEL_INFO(dev)->gen > 3) {
298
		int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
299
		if (IS_GEN6(dev) || IS_GEN7(dev))
300
			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
301
		I915_WRITE(MI_MODE, mode);
302
		if (IS_GEN7(dev))
303
			I915_WRITE(GFX_MODE_GEN7,
304
				   GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
305
				   GFX_MODE_ENABLE(GFX_REPLAY_MODE));
306
	}
307
 
308
	if (INTEL_INFO(dev)->gen >= 6) {
309
	} else if (IS_GEN5(dev)) {
310
//       ret = init_pipe_control(ring);
311
		if (ret)
312
			return ret;
313
	}
314
 
315
    LEAVE();
316
 
317
	return ret;
318
}
319
 
320
#if 0
321
 
322
static void render_ring_cleanup(struct intel_ring_buffer *ring)
323
{
324
	if (!ring->private)
325
		return;
326
 
327
	cleanup_pipe_control(ring);
328
}
329
 
330
static void
331
update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
332
{
333
	struct drm_device *dev = ring->dev;
334
	struct drm_i915_private *dev_priv = dev->dev_private;
335
	int id;
336
 
337
	/*
338
	 * cs -> 1 = vcs, 0 = bcs
339
	 * vcs -> 1 = bcs, 0 = cs,
340
	 * bcs -> 1 = cs, 0 = vcs.
341
	 */
342
	id = ring - dev_priv->ring;
343
	id += 2 - i;
344
	id %= 3;
345
 
346
	intel_ring_emit(ring,
347
			MI_SEMAPHORE_MBOX |
348
			MI_SEMAPHORE_REGISTER |
349
			MI_SEMAPHORE_UPDATE);
350
	intel_ring_emit(ring, seqno);
351
	intel_ring_emit(ring,
352
			RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
353
}
354
 
355
static int
356
gen6_add_request(struct intel_ring_buffer *ring,
357
		 u32 *result)
358
{
359
	u32 seqno;
360
	int ret;
361
 
362
	ret = intel_ring_begin(ring, 10);
363
	if (ret)
364
		return ret;
365
 
366
	seqno = i915_gem_get_seqno(ring->dev);
367
	update_semaphore(ring, 0, seqno);
368
	update_semaphore(ring, 1, seqno);
369
 
370
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
371
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
372
	intel_ring_emit(ring, seqno);
373
	intel_ring_emit(ring, MI_USER_INTERRUPT);
374
	intel_ring_advance(ring);
375
 
376
	*result = seqno;
377
	return 0;
378
}
379
 
380
int
381
intel_ring_sync(struct intel_ring_buffer *ring,
382
		struct intel_ring_buffer *to,
383
		u32 seqno)
384
{
385
	int ret;
386
 
387
	ret = intel_ring_begin(ring, 4);
388
	if (ret)
389
		return ret;
390
 
391
	intel_ring_emit(ring,
392
			MI_SEMAPHORE_MBOX |
393
			MI_SEMAPHORE_REGISTER |
394
			intel_ring_sync_index(ring, to) << 17 |
395
			MI_SEMAPHORE_COMPARE);
396
	intel_ring_emit(ring, seqno);
397
	intel_ring_emit(ring, 0);
398
	intel_ring_emit(ring, MI_NOOP);
399
	intel_ring_advance(ring);
400
 
401
	return 0;
402
}
403
 
404
#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
405
do {									\
406
	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
407
		 PIPE_CONTROL_DEPTH_STALL | 2);				\
408
	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
409
	intel_ring_emit(ring__, 0);							\
410
	intel_ring_emit(ring__, 0);							\
411
} while (0)
412
 
413
static int
414
pc_render_add_request(struct intel_ring_buffer *ring,
415
		      u32 *result)
416
{
417
	struct drm_device *dev = ring->dev;
418
	u32 seqno = i915_gem_get_seqno(dev);
419
	struct pipe_control *pc = ring->private;
420
	u32 scratch_addr = pc->gtt_offset + 128;
421
	int ret;
422
 
423
	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
424
	 * incoherent with writes to memory, i.e. completely fubar,
425
	 * so we need to use PIPE_NOTIFY instead.
426
	 *
427
	 * However, we also need to workaround the qword write
428
	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
429
	 * memory before requesting an interrupt.
430
	 */
431
	ret = intel_ring_begin(ring, 32);
432
	if (ret)
433
		return ret;
434
 
435
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
436
			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
437
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
438
	intel_ring_emit(ring, seqno);
439
	intel_ring_emit(ring, 0);
440
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
441
	scratch_addr += 128; /* write to separate cachelines */
442
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
443
	scratch_addr += 128;
444
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
445
	scratch_addr += 128;
446
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
447
	scratch_addr += 128;
448
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
449
	scratch_addr += 128;
450
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
451
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
452
			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
453
			PIPE_CONTROL_NOTIFY);
454
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
455
	intel_ring_emit(ring, seqno);
456
	intel_ring_emit(ring, 0);
457
	intel_ring_advance(ring);
458
 
459
	*result = seqno;
460
	return 0;
461
}
462
 
463
static int
464
render_ring_add_request(struct intel_ring_buffer *ring,
465
			u32 *result)
466
{
467
	struct drm_device *dev = ring->dev;
468
	u32 seqno = i915_gem_get_seqno(dev);
469
	int ret;
470
 
471
	ret = intel_ring_begin(ring, 4);
472
	if (ret)
473
		return ret;
474
 
475
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
476
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
477
	intel_ring_emit(ring, seqno);
478
	intel_ring_emit(ring, MI_USER_INTERRUPT);
479
	intel_ring_advance(ring);
480
 
481
	*result = seqno;
482
	return 0;
483
}
484
 
485
static u32
486
ring_get_seqno(struct intel_ring_buffer *ring)
487
{
488
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
489
}
490
 
491
static u32
492
pc_render_get_seqno(struct intel_ring_buffer *ring)
493
{
494
	struct pipe_control *pc = ring->private;
495
	return pc->cpu_page[0];
496
}
497
 
498
static void
499
ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
500
{
501
	dev_priv->gt_irq_mask &= ~mask;
502
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
503
	POSTING_READ(GTIMR);
504
}
505
 
506
static void
507
ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
508
{
509
	dev_priv->gt_irq_mask |= mask;
510
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
511
	POSTING_READ(GTIMR);
512
}
513
 
514
static void
515
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
516
{
517
	dev_priv->irq_mask &= ~mask;
518
	I915_WRITE(IMR, dev_priv->irq_mask);
519
	POSTING_READ(IMR);
520
}
521
 
522
static void
523
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
524
{
525
	dev_priv->irq_mask |= mask;
526
	I915_WRITE(IMR, dev_priv->irq_mask);
527
	POSTING_READ(IMR);
528
}
529
 
530
static bool
531
render_ring_get_irq(struct intel_ring_buffer *ring)
532
{
533
	struct drm_device *dev = ring->dev;
534
	drm_i915_private_t *dev_priv = dev->dev_private;
535
 
536
	if (!dev->irq_enabled)
537
		return false;
538
 
539
	spin_lock(&ring->irq_lock);
540
	if (ring->irq_refcount++ == 0) {
541
		if (HAS_PCH_SPLIT(dev))
542
			ironlake_enable_irq(dev_priv,
543
					    GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
544
		else
545
			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
546
	}
547
	spin_unlock(&ring->irq_lock);
548
 
549
	return true;
550
}
551
 
552
static void
553
render_ring_put_irq(struct intel_ring_buffer *ring)
554
{
555
	struct drm_device *dev = ring->dev;
556
	drm_i915_private_t *dev_priv = dev->dev_private;
557
 
558
	spin_lock(&ring->irq_lock);
559
	if (--ring->irq_refcount == 0) {
560
		if (HAS_PCH_SPLIT(dev))
561
			ironlake_disable_irq(dev_priv,
562
					     GT_USER_INTERRUPT |
563
					     GT_PIPE_NOTIFY);
564
		else
565
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
566
	}
567
	spin_unlock(&ring->irq_lock);
568
}
569
 
570
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
571
{
572
	struct drm_device *dev = ring->dev;
573
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
574
	u32 mmio = 0;
575
 
576
	/* The ring status page addresses are no longer next to the rest of
577
	 * the ring registers as of gen7.
578
	 */
579
	if (IS_GEN7(dev)) {
580
		switch (ring->id) {
581
		case RING_RENDER:
582
			mmio = RENDER_HWS_PGA_GEN7;
583
			break;
584
		case RING_BLT:
585
			mmio = BLT_HWS_PGA_GEN7;
586
			break;
587
		case RING_BSD:
588
			mmio = BSD_HWS_PGA_GEN7;
589
			break;
590
		}
591
	} else if (IS_GEN6(ring->dev)) {
592
		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
593
	} else {
594
		mmio = RING_HWS_PGA(ring->mmio_base);
595
	}
596
 
597
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
598
	POSTING_READ(mmio);
599
}
600
#endif
601
 
602
static int
603
bsd_ring_flush(struct intel_ring_buffer *ring,
604
	       u32     invalidate_domains,
605
	       u32     flush_domains)
606
{
607
	int ret;
608
 
609
	ret = intel_ring_begin(ring, 2);
610
	if (ret)
611
		return ret;
612
 
613
	intel_ring_emit(ring, MI_FLUSH);
614
	intel_ring_emit(ring, MI_NOOP);
615
	intel_ring_advance(ring);
616
	return 0;
617
}
618
 
619
#if 0
620
 
621
static int
622
ring_add_request(struct intel_ring_buffer *ring,
623
		 u32 *result)
624
{
625
	u32 seqno;
626
	int ret;
627
 
628
	ret = intel_ring_begin(ring, 4);
629
	if (ret)
630
		return ret;
631
 
632
	seqno = i915_gem_get_seqno(ring->dev);
633
 
634
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
635
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
636
	intel_ring_emit(ring, seqno);
637
	intel_ring_emit(ring, MI_USER_INTERRUPT);
638
	intel_ring_advance(ring);
639
 
640
	*result = seqno;
641
	return 0;
642
}
643
 
644
static bool
645
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
646
{
647
	struct drm_device *dev = ring->dev;
648
	drm_i915_private_t *dev_priv = dev->dev_private;
649
 
650
	if (!dev->irq_enabled)
651
	       return false;
652
 
653
	spin_lock(&ring->irq_lock);
654
	if (ring->irq_refcount++ == 0) {
655
		ring->irq_mask &= ~rflag;
656
		I915_WRITE_IMR(ring, ring->irq_mask);
657
		ironlake_enable_irq(dev_priv, gflag);
658
	}
659
	spin_unlock(&ring->irq_lock);
660
 
661
	return true;
662
}
663
 
664
static void
665
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
666
{
667
	struct drm_device *dev = ring->dev;
668
	drm_i915_private_t *dev_priv = dev->dev_private;
669
 
670
	spin_lock(&ring->irq_lock);
671
	if (--ring->irq_refcount == 0) {
672
		ring->irq_mask |= rflag;
673
		I915_WRITE_IMR(ring, ring->irq_mask);
674
		ironlake_disable_irq(dev_priv, gflag);
675
	}
676
	spin_unlock(&ring->irq_lock);
677
}
678
 
679
static bool
680
bsd_ring_get_irq(struct intel_ring_buffer *ring)
681
{
682
	struct drm_device *dev = ring->dev;
683
	drm_i915_private_t *dev_priv = dev->dev_private;
684
 
685
	if (!dev->irq_enabled)
686
		return false;
687
 
688
	spin_lock(&ring->irq_lock);
689
	if (ring->irq_refcount++ == 0) {
690
		if (IS_G4X(dev))
691
			i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
692
		else
693
			ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
694
	}
695
	spin_unlock(&ring->irq_lock);
696
 
697
	return true;
698
}
699
static void
700
bsd_ring_put_irq(struct intel_ring_buffer *ring)
701
{
702
	struct drm_device *dev = ring->dev;
703
	drm_i915_private_t *dev_priv = dev->dev_private;
704
 
705
	spin_lock(&ring->irq_lock);
706
	if (--ring->irq_refcount == 0) {
707
		if (IS_G4X(dev))
708
			i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
709
		else
710
			ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
711
	}
712
	spin_unlock(&ring->irq_lock);
713
}
714
 
715
static int
716
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
717
{
718
	int ret;
719
 
720
	ret = intel_ring_begin(ring, 2);
721
	if (ret)
722
		return ret;
723
 
724
	intel_ring_emit(ring,
725
			MI_BATCH_BUFFER_START | (2 << 6) |
726
			MI_BATCH_NON_SECURE_I965);
727
	intel_ring_emit(ring, offset);
728
	intel_ring_advance(ring);
729
 
730
	return 0;
731
}
732
 
733
static int
734
render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
735
				u32 offset, u32 len)
736
{
737
	struct drm_device *dev = ring->dev;
738
	int ret;
739
 
740
	if (IS_I830(dev) || IS_845G(dev)) {
741
		ret = intel_ring_begin(ring, 4);
742
		if (ret)
743
			return ret;
744
 
745
		intel_ring_emit(ring, MI_BATCH_BUFFER);
746
		intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
747
		intel_ring_emit(ring, offset + len - 8);
748
		intel_ring_emit(ring, 0);
749
	} else {
750
		ret = intel_ring_begin(ring, 2);
751
		if (ret)
752
			return ret;
753
 
754
		if (INTEL_INFO(dev)->gen >= 4) {
755
			intel_ring_emit(ring,
756
					MI_BATCH_BUFFER_START | (2 << 6) |
757
					MI_BATCH_NON_SECURE_I965);
758
			intel_ring_emit(ring, offset);
759
		} else {
760
			intel_ring_emit(ring,
761
					MI_BATCH_BUFFER_START | (2 << 6));
762
			intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
763
		}
764
	}
765
	intel_ring_advance(ring);
766
 
767
	return 0;
768
}
769
 
770
static void cleanup_status_page(struct intel_ring_buffer *ring)
771
{
772
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
773
	struct drm_i915_gem_object *obj;
774
 
775
	obj = ring->status_page.obj;
776
	if (obj == NULL)
777
		return;
778
 
779
	kunmap(obj->pages[0]);
780
	i915_gem_object_unpin(obj);
781
	drm_gem_object_unreference(&obj->base);
782
	ring->status_page.obj = NULL;
783
 
784
	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
785
}
786
 
787
static int init_status_page(struct intel_ring_buffer *ring)
788
{
789
	struct drm_device *dev = ring->dev;
790
	drm_i915_private_t *dev_priv = dev->dev_private;
791
	struct drm_i915_gem_object *obj;
792
	int ret;
793
 
794
	obj = i915_gem_alloc_object(dev, 4096);
795
	if (obj == NULL) {
796
		DRM_ERROR("Failed to allocate status page\n");
797
		ret = -ENOMEM;
798
		goto err;
799
	}
800
 
801
	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
802
 
803
	ret = i915_gem_object_pin(obj, 4096, true);
804
	if (ret != 0) {
805
		goto err_unref;
806
	}
807
 
808
	ring->status_page.gfx_addr = obj->gtt_offset;
809
	ring->status_page.page_addr = kmap(obj->pages[0]);
810
	if (ring->status_page.page_addr == NULL) {
811
		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
812
		goto err_unpin;
813
	}
814
	ring->status_page.obj = obj;
815
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
816
 
817
	intel_ring_setup_status_page(ring);
818
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
819
			ring->name, ring->status_page.gfx_addr);
820
 
821
	return 0;
822
 
823
err_unpin:
824
	i915_gem_object_unpin(obj);
825
err_unref:
826
	drm_gem_object_unreference(&obj->base);
827
err:
828
	return ret;
829
}
830
#endif
831
 
832
int intel_init_ring_buffer(struct drm_device *dev,
833
			   struct intel_ring_buffer *ring)
834
{
835
    struct drm_i915_gem_object *obj=NULL;
836
	int ret;
837
    ENTER();
838
	ring->dev = dev;
839
	INIT_LIST_HEAD(&ring->active_list);
840
	INIT_LIST_HEAD(&ring->request_list);
841
	INIT_LIST_HEAD(&ring->gpu_write_list);
842
 
843
//   init_waitqueue_head(&ring->irq_queue);
844
//   spin_lock_init(&ring->irq_lock);
845
    ring->irq_mask = ~0;
846
 
847
	if (I915_NEED_GFX_HWS(dev)) {
848
//       ret = init_status_page(ring);
849
//       if (ret)
850
//           return ret;
851
	}
852
 
853
    obj = i915_gem_alloc_object(dev, ring->size);
854
	if (obj == NULL) {
855
		DRM_ERROR("Failed to allocate ringbuffer\n");
856
		ret = -ENOMEM;
857
		goto err_hws;
858
	}
859
 
860
	ring->obj = obj;
861
 
862
    ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
863
	if (ret)
864
		goto err_unref;
865
 
866
	ring->map.size = ring->size;
867
    ring->map.offset = get_bus_addr() + obj->gtt_offset;
868
	ring->map.type = 0;
869
	ring->map.flags = 0;
870
	ring->map.mtrr = 0;
871
 
872
//   drm_core_ioremap_wc(&ring->map, dev);
873
 
874
    ring->map.handle = ioremap(ring->map.offset, ring->map.size);
875
 
876
	if (ring->map.handle == NULL) {
877
		DRM_ERROR("Failed to map ringbuffer.\n");
878
		ret = -EINVAL;
879
		goto err_unpin;
880
	}
881
 
882
	ring->virtual_start = ring->map.handle;
883
	ret = ring->init(ring);
884
	if (ret)
885
		goto err_unmap;
886
 
887
	/* Workaround an erratum on the i830 which causes a hang if
888
	 * the TAIL pointer points to within the last 2 cachelines
889
	 * of the buffer.
890
	 */
891
	ring->effective_size = ring->size;
892
	if (IS_I830(ring->dev))
893
		ring->effective_size -= 128;
894
    LEAVE();
895
	return 0;
896
 
897
err_unmap:
898
//   drm_core_ioremapfree(&ring->map, dev);
899
    FreeKernelSpace(ring->virtual_start);
900
err_unpin:
901
//   i915_gem_object_unpin(obj);
902
err_unref:
903
//   drm_gem_object_unreference(&obj->base);
904
	ring->obj = NULL;
905
err_hws:
906
//   cleanup_status_page(ring);
907
	return ret;
908
}
909
 
910
 
911
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
912
{
913
	struct drm_i915_private *dev_priv;
914
	int ret;
915
 
916
	if (ring->obj == NULL)
917
		return;
918
 
919
	/* Disable the ring buffer. The ring must be idle at this point */
920
	dev_priv = ring->dev->dev_private;
921
	ret = intel_wait_ring_idle(ring);
922
	if (ret)
923
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
924
			  ring->name, ret);
925
 
926
	I915_WRITE_CTL(ring, 0);
927
 
928
//   drm_core_ioremapfree(&ring->map, ring->dev);
929
 
930
//   i915_gem_object_unpin(ring->obj);
931
//   drm_gem_object_unreference(&ring->obj->base);
932
	ring->obj = NULL;
933
 
934
	if (ring->cleanup)
935
		ring->cleanup(ring);
936
 
937
//   cleanup_status_page(ring);
938
}
939
 
940
 
941
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
942
{
943
	unsigned int *virt;
944
	int rem = ring->size - ring->tail;
945
 
946
	if (ring->space < rem) {
947
		int ret = intel_wait_ring_buffer(ring, rem);
948
		if (ret)
949
			return ret;
950
	}
951
 
952
	virt = (unsigned int *)(ring->virtual_start + ring->tail);
953
	rem /= 8;
954
	while (rem--) {
955
		*virt++ = MI_NOOP;
956
		*virt++ = MI_NOOP;
957
	}
958
 
959
	ring->tail = 0;
960
	ring->space = ring_space(ring);
961
 
962
	return 0;
963
}
964
 
965
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
966
{
967
	struct drm_device *dev = ring->dev;
968
	struct drm_i915_private *dev_priv = dev->dev_private;
969
	unsigned long end;
970
	u32 head;
971
 
972
	/* If the reported head position has wrapped or hasn't advanced,
973
	 * fallback to the slow and accurate path.
974
	 */
975
	head = intel_read_status_page(ring, 4);
976
	if (head > ring->head) {
977
		ring->head = head;
978
		ring->space = ring_space(ring);
979
		if (ring->space >= n)
980
			return 0;
981
	}
982
 
983
//   trace_i915_ring_wait_begin(ring);
984
	end = jiffies + 3 * HZ;
985
	do {
986
		ring->head = I915_READ_HEAD(ring);
987
		ring->space = ring_space(ring);
988
		if (ring->space >= n) {
989
//           trace_i915_ring_wait_end(ring);
990
			return 0;
991
		}
992
 
993
		if (dev->primary->master) {
994
			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
995
			if (master_priv->sarea_priv)
996
				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
997
		}
998
 
999
		msleep(1);
1000
		if (atomic_read(&dev_priv->mm.wedged))
1001
			return -EAGAIN;
1002
	} while (!time_after(jiffies, end));
1003
//   trace_i915_ring_wait_end(ring);
1004
	return -EBUSY;
1005
}
1006
 
1007
int intel_ring_begin(struct intel_ring_buffer *ring,
1008
		     int num_dwords)
1009
{
1010
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1011
	int n = 4*num_dwords;
1012
	int ret;
1013
 
1014
	if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1015
		return -EIO;
1016
 
1017
	if (unlikely(ring->tail + n > ring->effective_size)) {
1018
		ret = intel_wrap_ring_buffer(ring);
1019
		if (unlikely(ret))
1020
			return ret;
1021
	}
1022
 
1023
	if (unlikely(ring->space < n)) {
1024
		ret = intel_wait_ring_buffer(ring, n);
1025
		if (unlikely(ret))
1026
			return ret;
1027
	}
1028
 
1029
	ring->space -= n;
1030
	return 0;
1031
}
1032
 
1033
void intel_ring_advance(struct intel_ring_buffer *ring)
1034
{
1035
	ring->tail &= ring->size - 1;
1036
	ring->write_tail(ring, ring->tail);
1037
}
1038
 
1039
 
1040
static const struct intel_ring_buffer render_ring = {
1041
	.name			= "render ring",
1042
	.id			= RING_RENDER,
1043
	.mmio_base		= RENDER_RING_BASE,
1044
	.size			= 32 * PAGE_SIZE,
1045
	.init			= init_render_ring,
1046
    .write_tail     = ring_write_tail,
1047
    .flush          = render_ring_flush,
1048
//   .add_request        = render_ring_add_request,
1049
//   .get_seqno      = ring_get_seqno,
1050
//   .irq_get        = render_ring_get_irq,
1051
//   .irq_put        = render_ring_put_irq,
1052
//   .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1053
//       .cleanup            = render_ring_cleanup,
1054
};
1055
 
1056
/* ring buffer for bit-stream decoder */
1057
 
1058
static const struct intel_ring_buffer bsd_ring = {
1059
	.name                   = "bsd ring",
1060
	.id			= RING_BSD,
1061
	.mmio_base		= BSD_RING_BASE,
1062
	.size			= 32 * PAGE_SIZE,
1063
	.init			= init_ring_common,
1064
	.write_tail		= ring_write_tail,
1065
    .flush          = bsd_ring_flush,
1066
//   .add_request        = ring_add_request,
1067
//   .get_seqno      = ring_get_seqno,
1068
//   .irq_get        = bsd_ring_get_irq,
1069
//   .irq_put        = bsd_ring_put_irq,
1070
//   .dispatch_execbuffer    = ring_dispatch_execbuffer,
1071
};
1072
 
1073
 
1074
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1075
				     u32 value)
1076
{
1077
       drm_i915_private_t *dev_priv = ring->dev->dev_private;
1078
 
1079
       /* Every tail move must follow the sequence below */
1080
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1081
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1082
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1083
       I915_WRITE(GEN6_BSD_RNCID, 0x0);
1084
 
1085
       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1086
                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1087
                       50))
1088
               DRM_ERROR("timed out waiting for IDLE Indicator\n");
1089
 
1090
       I915_WRITE_TAIL(ring, value);
1091
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1092
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1093
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1094
}
1095
 
1096
 
1097
static int gen6_ring_flush(struct intel_ring_buffer *ring,
1098
			   u32 invalidate, u32 flush)
1099
{
1100
	uint32_t cmd;
1101
	int ret;
1102
 
1103
	ret = intel_ring_begin(ring, 4);
1104
	if (ret)
1105
		return ret;
1106
 
1107
	cmd = MI_FLUSH_DW;
1108
	if (invalidate & I915_GEM_GPU_DOMAINS)
1109
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1110
	intel_ring_emit(ring, cmd);
1111
	intel_ring_emit(ring, 0);
1112
	intel_ring_emit(ring, 0);
1113
	intel_ring_emit(ring, MI_NOOP);
1114
	intel_ring_advance(ring);
1115
	return 0;
1116
}
1117
 
1118
#if 0
1119
static int
1120
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1121
			      u32 offset, u32 len)
1122
{
1123
       int ret;
1124
 
1125
       ret = intel_ring_begin(ring, 2);
1126
       if (ret)
1127
	       return ret;
1128
 
1129
       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1130
       /* bit0-7 is the length on GEN6+ */
1131
       intel_ring_emit(ring, offset);
1132
       intel_ring_advance(ring);
1133
 
1134
       return 0;
1135
}
1136
 
1137
static bool
1138
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1139
{
1140
	return gen6_ring_get_irq(ring,
1141
				 GT_USER_INTERRUPT,
1142
				 GEN6_RENDER_USER_INTERRUPT);
1143
}
1144
 
1145
static void
1146
gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1147
{
1148
	return gen6_ring_put_irq(ring,
1149
				 GT_USER_INTERRUPT,
1150
				 GEN6_RENDER_USER_INTERRUPT);
1151
}
1152
 
1153
static bool
1154
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1155
{
1156
	return gen6_ring_get_irq(ring,
1157
				 GT_GEN6_BSD_USER_INTERRUPT,
1158
				 GEN6_BSD_USER_INTERRUPT);
1159
}
1160
 
1161
static void
1162
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1163
{
1164
	return gen6_ring_put_irq(ring,
1165
				 GT_GEN6_BSD_USER_INTERRUPT,
1166
				 GEN6_BSD_USER_INTERRUPT);
1167
}
1168
 
1169
#endif
1170
 
1171
/* ring buffer for Video Codec for Gen6+ */
1172
static const struct intel_ring_buffer gen6_bsd_ring = {
1173
	.name			= "gen6 bsd ring",
1174
	.id			= RING_BSD,
1175
	.mmio_base		= GEN6_BSD_RING_BASE,
1176
	.size			= 32 * PAGE_SIZE,
1177
	.init			= init_ring_common,
1178
	.write_tail		= gen6_bsd_ring_write_tail,
1179
    .flush          = gen6_ring_flush,
1180
//   .add_request        = gen6_add_request,
1181
//   .get_seqno      = ring_get_seqno,
1182
//   .irq_get        = gen6_bsd_ring_get_irq,
1183
//   .irq_put        = gen6_bsd_ring_put_irq,
1184
//   .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1185
};
1186
 
1187
#if 0
1188
/* Blitter support (SandyBridge+) */
1189
 
1190
static bool
1191
blt_ring_get_irq(struct intel_ring_buffer *ring)
1192
{
1193
	return gen6_ring_get_irq(ring,
1194
				 GT_BLT_USER_INTERRUPT,
1195
				 GEN6_BLITTER_USER_INTERRUPT);
1196
}
1197
 
1198
static void
1199
blt_ring_put_irq(struct intel_ring_buffer *ring)
1200
{
1201
	gen6_ring_put_irq(ring,
1202
			  GT_BLT_USER_INTERRUPT,
1203
			  GEN6_BLITTER_USER_INTERRUPT);
1204
}
1205
#endif
1206
 
1207
 
1208
/* Workaround for some stepping of SNB,
1209
 * each time when BLT engine ring tail moved,
1210
 * the first command in the ring to be parsed
1211
 * should be MI_BATCH_BUFFER_START
1212
 */
1213
#define NEED_BLT_WORKAROUND(dev) \
1214
	(IS_GEN6(dev) && (dev->pdev->revision < 8))
1215
 
1216
static inline struct drm_i915_gem_object *
1217
to_blt_workaround(struct intel_ring_buffer *ring)
1218
{
1219
	return ring->private;
1220
}
1221
 
1222
 
1223
static int blt_ring_init(struct intel_ring_buffer *ring)
1224
{
1225
	if (NEED_BLT_WORKAROUND(ring->dev)) {
1226
		struct drm_i915_gem_object *obj;
1227
		u32 *ptr;
1228
		int ret;
1229
 
1230
		obj = i915_gem_alloc_object(ring->dev, 4096);
1231
		if (obj == NULL)
1232
			return -ENOMEM;
1233
 
1234
		ret = i915_gem_object_pin(obj, 4096, true);
1235
		if (ret) {
1236
//           drm_gem_object_unreference(&obj->base);
1237
			return ret;
1238
		}
1239
 
1240
        ptr = ioremap(obj->pages[0], 4096);
1241
		*ptr++ = MI_BATCH_BUFFER_END;
1242
		*ptr++ = MI_NOOP;
2335 Serge 1243
//        iounmap(obj->pages[0]);
2332 Serge 1244
 
1245
		ret = i915_gem_object_set_to_gtt_domain(obj, false);
1246
		if (ret) {
1247
//           i915_gem_object_unpin(obj);
1248
//           drm_gem_object_unreference(&obj->base);
1249
			return ret;
1250
		}
1251
 
1252
		ring->private = obj;
1253
	}
1254
 
1255
	return init_ring_common(ring);
1256
}
1257
 
1258
static int blt_ring_begin(struct intel_ring_buffer *ring,
1259
			  int num_dwords)
1260
{
1261
	if (ring->private) {
1262
		int ret = intel_ring_begin(ring, num_dwords+2);
1263
		if (ret)
1264
			return ret;
1265
 
1266
		intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1267
		intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1268
 
1269
		return 0;
1270
	} else
1271
		return intel_ring_begin(ring, 4);
1272
}
1273
 
1274
static int blt_ring_flush(struct intel_ring_buffer *ring,
1275
			  u32 invalidate, u32 flush)
1276
{
1277
	uint32_t cmd;
1278
	int ret;
1279
 
1280
	ret = blt_ring_begin(ring, 4);
1281
	if (ret)
1282
		return ret;
1283
 
1284
	cmd = MI_FLUSH_DW;
1285
	if (invalidate & I915_GEM_DOMAIN_RENDER)
1286
		cmd |= MI_INVALIDATE_TLB;
1287
	intel_ring_emit(ring, cmd);
1288
	intel_ring_emit(ring, 0);
1289
	intel_ring_emit(ring, 0);
1290
	intel_ring_emit(ring, MI_NOOP);
1291
	intel_ring_advance(ring);
1292
	return 0;
1293
}
1294
 
1295
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1296
{
1297
	if (!ring->private)
1298
		return;
1299
 
1300
	i915_gem_object_unpin(ring->private);
1301
	drm_gem_object_unreference(ring->private);
1302
	ring->private = NULL;
1303
}
1304
 
1305
 
1306
static const struct intel_ring_buffer gen6_blt_ring = {
1307
       .name			= "blt ring",
1308
       .id			= RING_BLT,
1309
       .mmio_base		= BLT_RING_BASE,
1310
       .size			= 32 * PAGE_SIZE,
1311
       .init			= blt_ring_init,
1312
       .write_tail		= ring_write_tail,
1313
       .flush          = blt_ring_flush,
1314
//       .add_request        = gen6_add_request,
1315
//       .get_seqno      = ring_get_seqno,
1316
//       .irq_get            = blt_ring_get_irq,
1317
//       .irq_put            = blt_ring_put_irq,
1318
//       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1319
//       .cleanup            = blt_ring_cleanup,
1320
};
1321
 
1322
 
1323
 
1324
int intel_init_render_ring_buffer(struct drm_device *dev)
1325
{
1326
	drm_i915_private_t *dev_priv = dev->dev_private;
1327
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1328
    ENTER();
1329
	*ring = render_ring;
1330
	if (INTEL_INFO(dev)->gen >= 6) {
1331
//       ring->add_request = gen6_add_request;
1332
//       ring->irq_get = gen6_render_ring_get_irq;
1333
//       ring->irq_put = gen6_render_ring_put_irq;
1334
	} else if (IS_GEN5(dev)) {
1335
//       ring->add_request = pc_render_add_request;
1336
//       ring->get_seqno = pc_render_get_seqno;
1337
	}
1338
 
1339
	if (!I915_NEED_GFX_HWS(dev)) {
1340
		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1341
		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1342
	}
1343
    LEAVE();
1344
	return intel_init_ring_buffer(dev, ring);
1345
}
1346
 
1347
 
1348
int intel_init_bsd_ring_buffer(struct drm_device *dev)
1349
{
1350
	drm_i915_private_t *dev_priv = dev->dev_private;
1351
	struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1352
 
1353
	if (IS_GEN6(dev) || IS_GEN7(dev))
1354
		*ring = gen6_bsd_ring;
1355
	else
1356
		*ring = bsd_ring;
1357
 
1358
	return intel_init_ring_buffer(dev, ring);
1359
}
1360
 
1361
int intel_init_blt_ring_buffer(struct drm_device *dev)
1362
{
1363
	drm_i915_private_t *dev_priv = dev->dev_private;
1364
	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1365
 
1366
	*ring = gen6_blt_ring;
1367
 
1368
	return intel_init_ring_buffer(dev, ring);
1369
}