Subversion Repositories Kolibri OS

Rev

Rev 2339 | Rev 2342 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2332 Serge 1
/*
2
 * Copyright © 2008-2010 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *    Zou Nan hai 
26
 *    Xiang Hai hao
27
 *
28
 */
29
#define iowrite32(v, addr)      writel((v), (addr))
30
#define ioread32(addr)          readl(addr)
31
 
32
#include "drmP.h"
33
#include "drm.h"
34
#include "i915_drv.h"
35
#include "i915_drm.h"
36
//#include "i915_trace.h"
37
#include "intel_drv.h"
38
 
39
static inline int ring_space(struct intel_ring_buffer *ring)
40
{
41
	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
42
	if (space < 0)
43
		space += ring->size;
44
	return space;
45
}
46
 
47
static u32 i915_gem_get_seqno(struct drm_device *dev)
48
{
49
	drm_i915_private_t *dev_priv = dev->dev_private;
50
	u32 seqno;
51
 
52
	seqno = dev_priv->next_seqno;
53
 
54
	/* reserve 0 for non-seqno */
55
	if (++dev_priv->next_seqno == 0)
56
		dev_priv->next_seqno = 1;
57
 
58
	return seqno;
59
}
60
 
61
static int
62
render_ring_flush(struct intel_ring_buffer *ring,
63
		  u32	invalidate_domains,
64
		  u32	flush_domains)
65
{
66
	struct drm_device *dev = ring->dev;
67
	u32 cmd;
68
	int ret;
69
 
70
	/*
71
	 * read/write caches:
72
	 *
73
	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
74
	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
75
	 * also flushed at 2d versus 3d pipeline switches.
76
	 *
77
	 * read-only caches:
78
	 *
79
	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
80
	 * MI_READ_FLUSH is set, and is always flushed on 965.
81
	 *
82
	 * I915_GEM_DOMAIN_COMMAND may not exist?
83
	 *
84
	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
85
	 * invalidated when MI_EXE_FLUSH is set.
86
	 *
87
	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
88
	 * invalidated with every MI_FLUSH.
89
	 *
90
	 * TLBs:
91
	 *
92
	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
93
	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
94
	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
95
	 * are flushed at any MI_FLUSH.
96
	 */
97
 
98
	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
99
	if ((invalidate_domains|flush_domains) &
100
	    I915_GEM_DOMAIN_RENDER)
101
		cmd &= ~MI_NO_WRITE_FLUSH;
102
	if (INTEL_INFO(dev)->gen < 4) {
103
		/*
104
		 * On the 965, the sampler cache always gets flushed
105
		 * and this bit is reserved.
106
		 */
107
		if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
108
			cmd |= MI_READ_FLUSH;
109
	}
110
	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
111
		cmd |= MI_EXE_FLUSH;
112
 
113
	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
114
	    (IS_G4X(dev) || IS_GEN5(dev)))
115
		cmd |= MI_INVALIDATE_ISP;
116
 
117
	ret = intel_ring_begin(ring, 2);
118
	if (ret)
119
		return ret;
120
 
121
	intel_ring_emit(ring, cmd);
122
	intel_ring_emit(ring, MI_NOOP);
123
	intel_ring_advance(ring);
124
 
125
	return 0;
126
}
127
 
128
static void ring_write_tail(struct intel_ring_buffer *ring,
129
			    u32 value)
130
{
131
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
132
	I915_WRITE_TAIL(ring, value);
133
}
134
 
135
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
136
{
137
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
138
	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
139
			RING_ACTHD(ring->mmio_base) : ACTHD;
140
 
141
	return I915_READ(acthd_reg);
142
}
143
 
144
static int init_ring_common(struct intel_ring_buffer *ring)
145
{
146
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
147
	struct drm_i915_gem_object *obj = ring->obj;
148
	u32 head;
149
 
150
	/* Stop the ring if it's running. */
151
	I915_WRITE_CTL(ring, 0);
152
	I915_WRITE_HEAD(ring, 0);
153
	ring->write_tail(ring, 0);
154
 
155
	/* Initialize the ring. */
156
	I915_WRITE_START(ring, obj->gtt_offset);
157
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
158
 
159
	/* G45 ring initialization fails to reset head to zero */
160
	if (head != 0) {
161
		DRM_DEBUG_KMS("%s head not reset to zero "
162
			      "ctl %08x head %08x tail %08x start %08x\n",
163
			      ring->name,
164
			      I915_READ_CTL(ring),
165
			      I915_READ_HEAD(ring),
166
			      I915_READ_TAIL(ring),
167
			      I915_READ_START(ring));
168
 
169
		I915_WRITE_HEAD(ring, 0);
170
 
171
		if (I915_READ_HEAD(ring) & HEAD_ADDR) {
172
			DRM_ERROR("failed to set %s head to zero "
173
				  "ctl %08x head %08x tail %08x start %08x\n",
174
				  ring->name,
175
				  I915_READ_CTL(ring),
176
				  I915_READ_HEAD(ring),
177
				  I915_READ_TAIL(ring),
178
				  I915_READ_START(ring));
179
		}
180
	}
181
 
182
	I915_WRITE_CTL(ring,
183
			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
184
			| RING_REPORT_64K | RING_VALID);
185
 
186
	/* If the head is still not zero, the ring is dead */
187
	if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
188
	    I915_READ_START(ring) != obj->gtt_offset ||
189
	    (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
190
		DRM_ERROR("%s initialization failed "
191
				"ctl %08x head %08x tail %08x start %08x\n",
192
				ring->name,
193
				I915_READ_CTL(ring),
194
				I915_READ_HEAD(ring),
195
				I915_READ_TAIL(ring),
196
				I915_READ_START(ring));
197
		return -EIO;
198
	}
199
 
200
    ring->head = I915_READ_HEAD(ring);
201
    ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
202
    ring->space = ring_space(ring);
203
 
204
 
205
	return 0;
206
}
207
 
208
/*
209
 * 965+ support PIPE_CONTROL commands, which provide finer grained control
210
 * over cache flushing.
211
 */
212
struct pipe_control {
213
	struct drm_i915_gem_object *obj;
214
	volatile u32 *cpu_page;
215
	u32 gtt_offset;
216
};
217
 
218
static int
219
init_pipe_control(struct intel_ring_buffer *ring)
220
{
221
	struct pipe_control *pc;
222
	struct drm_i915_gem_object *obj;
223
	int ret;
224
 
225
	if (ring->private)
226
		return 0;
227
 
228
	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
229
	if (!pc)
230
		return -ENOMEM;
231
 
232
	obj = i915_gem_alloc_object(ring->dev, 4096);
233
	if (obj == NULL) {
234
		DRM_ERROR("Failed to allocate seqno page\n");
235
		ret = -ENOMEM;
236
		goto err;
237
	}
238
 
2339 Serge 239
//   i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2332 Serge 240
 
241
	ret = i915_gem_object_pin(obj, 4096, true);
242
	if (ret)
243
		goto err_unref;
244
 
245
	pc->gtt_offset = obj->gtt_offset;
2339 Serge 246
    pc->cpu_page =  (void*)MapIoMem(obj->pages[0], 4096, PG_SW);
2332 Serge 247
	if (pc->cpu_page == NULL)
248
		goto err_unpin;
249
 
250
	pc->obj = obj;
251
	ring->private = pc;
252
	return 0;
253
 
254
err_unpin:
2339 Serge 255
//   i915_gem_object_unpin(obj);
2332 Serge 256
err_unref:
2339 Serge 257
//   drm_gem_object_unreference(&obj->base);
2332 Serge 258
err:
259
	kfree(pc);
260
	return ret;
261
}
262
 
263
static void
264
cleanup_pipe_control(struct intel_ring_buffer *ring)
265
{
266
	struct pipe_control *pc = ring->private;
267
	struct drm_i915_gem_object *obj;
268
 
269
	if (!ring->private)
270
		return;
271
 
272
	obj = pc->obj;
2339 Serge 273
//	kunmap(obj->pages[0]);
274
//	i915_gem_object_unpin(obj);
275
//	drm_gem_object_unreference(&obj->base);
2332 Serge 276
 
277
	kfree(pc);
278
	ring->private = NULL;
279
}
280
 
281
static int init_render_ring(struct intel_ring_buffer *ring)
282
{
283
	struct drm_device *dev = ring->dev;
284
	struct drm_i915_private *dev_priv = dev->dev_private;
285
	int ret = init_ring_common(ring);
286
 
287
	if (INTEL_INFO(dev)->gen > 3) {
288
		int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
289
		if (IS_GEN6(dev) || IS_GEN7(dev))
290
			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
291
		I915_WRITE(MI_MODE, mode);
292
		if (IS_GEN7(dev))
293
			I915_WRITE(GFX_MODE_GEN7,
294
				   GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
295
				   GFX_MODE_ENABLE(GFX_REPLAY_MODE));
296
	}
297
 
298
	if (INTEL_INFO(dev)->gen >= 6) {
299
	} else if (IS_GEN5(dev)) {
2339 Serge 300
		ret = init_pipe_control(ring);
2332 Serge 301
		if (ret)
302
			return ret;
303
	}
304
 
305
	return ret;
306
}
307
 
308
static void render_ring_cleanup(struct intel_ring_buffer *ring)
309
{
310
	if (!ring->private)
311
		return;
312
 
313
	cleanup_pipe_control(ring);
314
}
315
 
316
static void
317
update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
318
{
319
	struct drm_device *dev = ring->dev;
320
	struct drm_i915_private *dev_priv = dev->dev_private;
321
	int id;
322
 
323
	/*
324
	 * cs -> 1 = vcs, 0 = bcs
325
	 * vcs -> 1 = bcs, 0 = cs,
326
	 * bcs -> 1 = cs, 0 = vcs.
327
	 */
328
	id = ring - dev_priv->ring;
329
	id += 2 - i;
330
	id %= 3;
331
 
332
	intel_ring_emit(ring,
333
			MI_SEMAPHORE_MBOX |
334
			MI_SEMAPHORE_REGISTER |
335
			MI_SEMAPHORE_UPDATE);
336
	intel_ring_emit(ring, seqno);
337
	intel_ring_emit(ring,
338
			RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
339
}
340
 
341
static int
342
gen6_add_request(struct intel_ring_buffer *ring,
343
		 u32 *result)
344
{
345
	u32 seqno;
346
	int ret;
347
 
348
	ret = intel_ring_begin(ring, 10);
349
	if (ret)
350
		return ret;
351
 
352
	seqno = i915_gem_get_seqno(ring->dev);
353
	update_semaphore(ring, 0, seqno);
354
	update_semaphore(ring, 1, seqno);
355
 
356
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
357
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
358
	intel_ring_emit(ring, seqno);
359
	intel_ring_emit(ring, MI_USER_INTERRUPT);
360
	intel_ring_advance(ring);
361
 
362
	*result = seqno;
363
	return 0;
364
}
365
 
366
int
367
intel_ring_sync(struct intel_ring_buffer *ring,
368
		struct intel_ring_buffer *to,
369
		u32 seqno)
370
{
371
	int ret;
372
 
373
	ret = intel_ring_begin(ring, 4);
374
	if (ret)
375
		return ret;
376
 
377
	intel_ring_emit(ring,
378
			MI_SEMAPHORE_MBOX |
379
			MI_SEMAPHORE_REGISTER |
380
			intel_ring_sync_index(ring, to) << 17 |
381
			MI_SEMAPHORE_COMPARE);
382
	intel_ring_emit(ring, seqno);
383
	intel_ring_emit(ring, 0);
384
	intel_ring_emit(ring, MI_NOOP);
385
	intel_ring_advance(ring);
386
 
387
	return 0;
388
}
389
 
390
#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
391
do {									\
392
	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
393
		 PIPE_CONTROL_DEPTH_STALL | 2);				\
394
	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
395
	intel_ring_emit(ring__, 0);							\
396
	intel_ring_emit(ring__, 0);							\
397
} while (0)
398
 
399
static int
400
pc_render_add_request(struct intel_ring_buffer *ring,
401
		      u32 *result)
402
{
403
	struct drm_device *dev = ring->dev;
404
	u32 seqno = i915_gem_get_seqno(dev);
405
	struct pipe_control *pc = ring->private;
406
	u32 scratch_addr = pc->gtt_offset + 128;
407
	int ret;
408
 
409
	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
410
	 * incoherent with writes to memory, i.e. completely fubar,
411
	 * so we need to use PIPE_NOTIFY instead.
412
	 *
413
	 * However, we also need to workaround the qword write
414
	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
415
	 * memory before requesting an interrupt.
416
	 */
417
	ret = intel_ring_begin(ring, 32);
418
	if (ret)
419
		return ret;
420
 
421
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
422
			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
423
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
424
	intel_ring_emit(ring, seqno);
425
	intel_ring_emit(ring, 0);
426
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
427
	scratch_addr += 128; /* write to separate cachelines */
428
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
429
	scratch_addr += 128;
430
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
431
	scratch_addr += 128;
432
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
433
	scratch_addr += 128;
434
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
435
	scratch_addr += 128;
436
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
437
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
438
			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
439
			PIPE_CONTROL_NOTIFY);
440
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
441
	intel_ring_emit(ring, seqno);
442
	intel_ring_emit(ring, 0);
443
	intel_ring_advance(ring);
444
 
445
	*result = seqno;
446
	return 0;
447
}
448
 
449
static int
450
render_ring_add_request(struct intel_ring_buffer *ring,
451
			u32 *result)
452
{
453
	struct drm_device *dev = ring->dev;
454
	u32 seqno = i915_gem_get_seqno(dev);
455
	int ret;
456
 
457
	ret = intel_ring_begin(ring, 4);
458
	if (ret)
459
		return ret;
460
 
461
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
462
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
463
	intel_ring_emit(ring, seqno);
464
	intel_ring_emit(ring, MI_USER_INTERRUPT);
465
	intel_ring_advance(ring);
466
 
467
	*result = seqno;
468
	return 0;
469
}
470
 
471
static u32
472
ring_get_seqno(struct intel_ring_buffer *ring)
473
{
474
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
475
}
476
 
477
static u32
478
pc_render_get_seqno(struct intel_ring_buffer *ring)
479
{
480
	struct pipe_control *pc = ring->private;
481
	return pc->cpu_page[0];
482
}
483
 
484
static void
485
ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
486
{
487
	dev_priv->gt_irq_mask &= ~mask;
488
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
489
	POSTING_READ(GTIMR);
490
}
491
 
492
static void
493
ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
494
{
495
	dev_priv->gt_irq_mask |= mask;
496
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
497
	POSTING_READ(GTIMR);
498
}
499
 
500
static void
501
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
502
{
503
	dev_priv->irq_mask &= ~mask;
504
	I915_WRITE(IMR, dev_priv->irq_mask);
505
	POSTING_READ(IMR);
506
}
507
 
508
static void
509
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
510
{
511
	dev_priv->irq_mask |= mask;
512
	I915_WRITE(IMR, dev_priv->irq_mask);
513
	POSTING_READ(IMR);
514
}
515
 
2339 Serge 516
#if 0
2332 Serge 517
static bool
518
render_ring_get_irq(struct intel_ring_buffer *ring)
519
{
520
	struct drm_device *dev = ring->dev;
521
	drm_i915_private_t *dev_priv = dev->dev_private;
522
 
523
	if (!dev->irq_enabled)
524
		return false;
525
 
526
	spin_lock(&ring->irq_lock);
527
	if (ring->irq_refcount++ == 0) {
528
		if (HAS_PCH_SPLIT(dev))
529
			ironlake_enable_irq(dev_priv,
530
					    GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
531
		else
532
			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
533
	}
534
	spin_unlock(&ring->irq_lock);
535
 
536
	return true;
537
}
538
 
539
static void
540
render_ring_put_irq(struct intel_ring_buffer *ring)
541
{
542
	struct drm_device *dev = ring->dev;
543
	drm_i915_private_t *dev_priv = dev->dev_private;
544
 
545
	spin_lock(&ring->irq_lock);
546
	if (--ring->irq_refcount == 0) {
547
		if (HAS_PCH_SPLIT(dev))
548
			ironlake_disable_irq(dev_priv,
549
					     GT_USER_INTERRUPT |
550
					     GT_PIPE_NOTIFY);
551
		else
552
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
553
	}
554
	spin_unlock(&ring->irq_lock);
555
}
2340 Serge 556
#endif
2332 Serge 557
 
558
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
559
{
560
	struct drm_device *dev = ring->dev;
561
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
562
	u32 mmio = 0;
563
 
564
	/* The ring status page addresses are no longer next to the rest of
565
	 * the ring registers as of gen7.
566
	 */
567
	if (IS_GEN7(dev)) {
568
		switch (ring->id) {
569
		case RING_RENDER:
570
			mmio = RENDER_HWS_PGA_GEN7;
571
			break;
572
		case RING_BLT:
573
			mmio = BLT_HWS_PGA_GEN7;
574
			break;
575
		case RING_BSD:
576
			mmio = BSD_HWS_PGA_GEN7;
577
			break;
578
		}
579
	} else if (IS_GEN6(ring->dev)) {
580
		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
581
	} else {
582
		mmio = RING_HWS_PGA(ring->mmio_base);
583
	}
584
 
585
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
586
	POSTING_READ(mmio);
587
}
588
 
589
static int
590
bsd_ring_flush(struct intel_ring_buffer *ring,
591
	       u32     invalidate_domains,
592
	       u32     flush_domains)
593
{
594
	int ret;
595
 
596
	ret = intel_ring_begin(ring, 2);
597
	if (ret)
598
		return ret;
599
 
600
	intel_ring_emit(ring, MI_FLUSH);
601
	intel_ring_emit(ring, MI_NOOP);
602
	intel_ring_advance(ring);
603
	return 0;
604
}
605
 
606
static int
607
ring_add_request(struct intel_ring_buffer *ring,
608
		 u32 *result)
609
{
610
	u32 seqno;
611
	int ret;
612
 
613
	ret = intel_ring_begin(ring, 4);
614
	if (ret)
615
		return ret;
616
 
617
	seqno = i915_gem_get_seqno(ring->dev);
618
 
619
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
620
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
621
	intel_ring_emit(ring, seqno);
622
	intel_ring_emit(ring, MI_USER_INTERRUPT);
623
	intel_ring_advance(ring);
624
 
625
	*result = seqno;
626
	return 0;
627
}
628
 
2339 Serge 629
#if 0
630
 
2332 Serge 631
static bool
632
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
633
{
634
	struct drm_device *dev = ring->dev;
635
	drm_i915_private_t *dev_priv = dev->dev_private;
636
 
637
	if (!dev->irq_enabled)
638
	       return false;
639
 
640
	spin_lock(&ring->irq_lock);
641
	if (ring->irq_refcount++ == 0) {
642
		ring->irq_mask &= ~rflag;
643
		I915_WRITE_IMR(ring, ring->irq_mask);
644
		ironlake_enable_irq(dev_priv, gflag);
645
	}
646
	spin_unlock(&ring->irq_lock);
647
 
648
	return true;
649
}
650
 
651
static void
652
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
653
{
654
	struct drm_device *dev = ring->dev;
655
	drm_i915_private_t *dev_priv = dev->dev_private;
656
 
657
	spin_lock(&ring->irq_lock);
658
	if (--ring->irq_refcount == 0) {
659
		ring->irq_mask |= rflag;
660
		I915_WRITE_IMR(ring, ring->irq_mask);
661
		ironlake_disable_irq(dev_priv, gflag);
662
	}
663
	spin_unlock(&ring->irq_lock);
664
}
665
 
666
static bool
667
bsd_ring_get_irq(struct intel_ring_buffer *ring)
668
{
669
	struct drm_device *dev = ring->dev;
670
	drm_i915_private_t *dev_priv = dev->dev_private;
671
 
672
	if (!dev->irq_enabled)
673
		return false;
674
 
675
	spin_lock(&ring->irq_lock);
676
	if (ring->irq_refcount++ == 0) {
677
		if (IS_G4X(dev))
678
			i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
679
		else
680
			ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
681
	}
682
	spin_unlock(&ring->irq_lock);
683
 
684
	return true;
685
}
686
static void
687
bsd_ring_put_irq(struct intel_ring_buffer *ring)
688
{
689
	struct drm_device *dev = ring->dev;
690
	drm_i915_private_t *dev_priv = dev->dev_private;
691
 
692
	spin_lock(&ring->irq_lock);
693
	if (--ring->irq_refcount == 0) {
694
		if (IS_G4X(dev))
695
			i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
696
		else
697
			ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
698
	}
699
	spin_unlock(&ring->irq_lock);
700
}
2340 Serge 701
#endif
2332 Serge 702
 
703
static int
704
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
705
{
706
	int ret;
707
 
708
	ret = intel_ring_begin(ring, 2);
709
	if (ret)
710
		return ret;
711
 
712
	intel_ring_emit(ring,
713
			MI_BATCH_BUFFER_START | (2 << 6) |
714
			MI_BATCH_NON_SECURE_I965);
715
	intel_ring_emit(ring, offset);
716
	intel_ring_advance(ring);
717
 
718
	return 0;
719
}
720
 
721
static int
722
render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
723
				u32 offset, u32 len)
724
{
725
	struct drm_device *dev = ring->dev;
726
	int ret;
727
 
728
	if (IS_I830(dev) || IS_845G(dev)) {
729
		ret = intel_ring_begin(ring, 4);
730
		if (ret)
731
			return ret;
732
 
733
		intel_ring_emit(ring, MI_BATCH_BUFFER);
734
		intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
735
		intel_ring_emit(ring, offset + len - 8);
736
		intel_ring_emit(ring, 0);
737
	} else {
738
		ret = intel_ring_begin(ring, 2);
739
		if (ret)
740
			return ret;
741
 
742
		if (INTEL_INFO(dev)->gen >= 4) {
743
			intel_ring_emit(ring,
744
					MI_BATCH_BUFFER_START | (2 << 6) |
745
					MI_BATCH_NON_SECURE_I965);
746
			intel_ring_emit(ring, offset);
747
		} else {
748
			intel_ring_emit(ring,
749
					MI_BATCH_BUFFER_START | (2 << 6));
750
			intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
751
		}
752
	}
753
	intel_ring_advance(ring);
754
 
755
	return 0;
756
}
757
 
758
static void cleanup_status_page(struct intel_ring_buffer *ring)
759
{
760
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
761
	struct drm_i915_gem_object *obj;
762
 
763
	obj = ring->status_page.obj;
764
	if (obj == NULL)
765
		return;
766
 
767
	kunmap(obj->pages[0]);
2339 Serge 768
//   i915_gem_object_unpin(obj);
769
//   drm_gem_object_unreference(&obj->base);
2332 Serge 770
	ring->status_page.obj = NULL;
771
 
772
	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
773
}
774
 
775
static int init_status_page(struct intel_ring_buffer *ring)
776
{
777
	struct drm_device *dev = ring->dev;
778
	drm_i915_private_t *dev_priv = dev->dev_private;
779
	struct drm_i915_gem_object *obj;
780
	int ret;
781
 
782
	obj = i915_gem_alloc_object(dev, 4096);
783
	if (obj == NULL) {
784
		DRM_ERROR("Failed to allocate status page\n");
785
		ret = -ENOMEM;
786
		goto err;
787
	}
788
 
2340 Serge 789
//    i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2332 Serge 790
 
791
	ret = i915_gem_object_pin(obj, 4096, true);
792
	if (ret != 0) {
793
		goto err_unref;
794
	}
795
 
796
	ring->status_page.gfx_addr = obj->gtt_offset;
2340 Serge 797
    ring->status_page.page_addr = MapIoMem(obj->pages[0], 4096, PG_SW);
2332 Serge 798
	if (ring->status_page.page_addr == NULL) {
799
		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
800
		goto err_unpin;
801
	}
802
	ring->status_page.obj = obj;
803
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
804
 
805
	intel_ring_setup_status_page(ring);
806
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
807
			ring->name, ring->status_page.gfx_addr);
808
 
809
	return 0;
810
 
811
err_unpin:
2340 Serge 812
 //  i915_gem_object_unpin(obj);
2332 Serge 813
err_unref:
2340 Serge 814
 //  drm_gem_object_unreference(&obj->base);
2332 Serge 815
err:
816
	return ret;
817
}
818
 
819
int intel_init_ring_buffer(struct drm_device *dev,
820
			   struct intel_ring_buffer *ring)
821
{
2340 Serge 822
	struct drm_i915_gem_object *obj;
2332 Serge 823
	int ret;
2340 Serge 824
 
2332 Serge 825
	ring->dev = dev;
826
	INIT_LIST_HEAD(&ring->active_list);
827
	INIT_LIST_HEAD(&ring->request_list);
828
	INIT_LIST_HEAD(&ring->gpu_write_list);
829
 
830
//   init_waitqueue_head(&ring->irq_queue);
831
//   spin_lock_init(&ring->irq_lock);
832
    ring->irq_mask = ~0;
833
 
834
	if (I915_NEED_GFX_HWS(dev)) {
2340 Serge 835
       ret = init_status_page(ring);
836
       if (ret)
837
           return ret;
2332 Serge 838
	}
839
 
840
    obj = i915_gem_alloc_object(dev, ring->size);
841
	if (obj == NULL) {
842
		DRM_ERROR("Failed to allocate ringbuffer\n");
843
		ret = -ENOMEM;
844
		goto err_hws;
845
	}
846
 
847
	ring->obj = obj;
848
 
849
    ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
850
	if (ret)
851
		goto err_unref;
852
 
853
	ring->map.size = ring->size;
854
    ring->map.offset = get_bus_addr() + obj->gtt_offset;
855
	ring->map.type = 0;
856
	ring->map.flags = 0;
857
	ring->map.mtrr = 0;
858
 
859
//   drm_core_ioremap_wc(&ring->map, dev);
860
 
861
    ring->map.handle = ioremap(ring->map.offset, ring->map.size);
862
 
863
	if (ring->map.handle == NULL) {
864
		DRM_ERROR("Failed to map ringbuffer.\n");
865
		ret = -EINVAL;
866
		goto err_unpin;
867
	}
868
 
869
	ring->virtual_start = ring->map.handle;
870
	ret = ring->init(ring);
871
	if (ret)
872
		goto err_unmap;
873
 
874
	/* Workaround an erratum on the i830 which causes a hang if
875
	 * the TAIL pointer points to within the last 2 cachelines
876
	 * of the buffer.
877
	 */
878
	ring->effective_size = ring->size;
879
	if (IS_I830(ring->dev))
880
		ring->effective_size -= 128;
2340 Serge 881
 
2332 Serge 882
	return 0;
883
 
884
err_unmap:
885
//   drm_core_ioremapfree(&ring->map, dev);
886
    FreeKernelSpace(ring->virtual_start);
887
err_unpin:
888
//   i915_gem_object_unpin(obj);
889
err_unref:
890
//   drm_gem_object_unreference(&obj->base);
891
	ring->obj = NULL;
892
err_hws:
893
//   cleanup_status_page(ring);
894
	return ret;
895
}
896
 
897
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
898
{
899
	struct drm_i915_private *dev_priv;
900
	int ret;
901
 
902
	if (ring->obj == NULL)
903
		return;
904
 
905
	/* Disable the ring buffer. The ring must be idle at this point */
906
	dev_priv = ring->dev->dev_private;
907
	ret = intel_wait_ring_idle(ring);
908
	if (ret)
909
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
910
			  ring->name, ret);
911
 
912
	I915_WRITE_CTL(ring, 0);
913
 
914
//   drm_core_ioremapfree(&ring->map, ring->dev);
915
 
916
//   i915_gem_object_unpin(ring->obj);
917
//   drm_gem_object_unreference(&ring->obj->base);
918
	ring->obj = NULL;
919
 
920
	if (ring->cleanup)
921
		ring->cleanup(ring);
922
 
923
//   cleanup_status_page(ring);
924
}
925
 
926
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
927
{
928
	unsigned int *virt;
929
	int rem = ring->size - ring->tail;
930
 
2340 Serge 931
    ENTER();
932
 
2332 Serge 933
	if (ring->space < rem) {
934
		int ret = intel_wait_ring_buffer(ring, rem);
935
		if (ret)
936
			return ret;
937
	}
938
 
939
	virt = (unsigned int *)(ring->virtual_start + ring->tail);
940
	rem /= 8;
941
	while (rem--) {
942
		*virt++ = MI_NOOP;
943
		*virt++ = MI_NOOP;
944
	}
945
 
946
	ring->tail = 0;
947
	ring->space = ring_space(ring);
948
 
2340 Serge 949
    LEAVE();
2332 Serge 950
	return 0;
951
}
952
 
953
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
954
{
955
	struct drm_device *dev = ring->dev;
956
	struct drm_i915_private *dev_priv = dev->dev_private;
957
	unsigned long end;
958
	u32 head;
959
 
2340 Serge 960
    ENTER();
961
 
2332 Serge 962
	/* If the reported head position has wrapped or hasn't advanced,
963
	 * fallback to the slow and accurate path.
964
	 */
965
	head = intel_read_status_page(ring, 4);
966
	if (head > ring->head) {
967
		ring->head = head;
968
		ring->space = ring_space(ring);
969
		if (ring->space >= n)
2340 Serge 970
        {
971
            LEAVE();
2332 Serge 972
			return 0;
2340 Serge 973
        };
2332 Serge 974
	}
975
 
976
//   trace_i915_ring_wait_begin(ring);
977
	end = jiffies + 3 * HZ;
978
	do {
979
		ring->head = I915_READ_HEAD(ring);
980
		ring->space = ring_space(ring);
981
		if (ring->space >= n) {
982
//           trace_i915_ring_wait_end(ring);
2340 Serge 983
            LEAVE();
2332 Serge 984
			return 0;
985
		}
986
 
987
		msleep(1);
988
		if (atomic_read(&dev_priv->mm.wedged))
2340 Serge 989
        {
990
            LEAVE();
2332 Serge 991
			return -EAGAIN;
2340 Serge 992
        };
2332 Serge 993
	} while (!time_after(jiffies, end));
994
//   trace_i915_ring_wait_end(ring);
2340 Serge 995
    LEAVE();
996
 
2332 Serge 997
	return -EBUSY;
998
}
999
 
1000
int intel_ring_begin(struct intel_ring_buffer *ring,
1001
		     int num_dwords)
1002
{
1003
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1004
	int n = 4*num_dwords;
1005
	int ret;
1006
 
2340 Serge 1007
//   if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1008
//       return -EIO;
2332 Serge 1009
 
1010
	if (unlikely(ring->tail + n > ring->effective_size)) {
1011
		ret = intel_wrap_ring_buffer(ring);
1012
		if (unlikely(ret))
1013
			return ret;
1014
	}
1015
 
1016
	if (unlikely(ring->space < n)) {
1017
		ret = intel_wait_ring_buffer(ring, n);
1018
		if (unlikely(ret))
1019
			return ret;
1020
	}
1021
 
1022
	ring->space -= n;
1023
	return 0;
1024
}
1025
 
1026
void intel_ring_advance(struct intel_ring_buffer *ring)
1027
{
1028
	ring->tail &= ring->size - 1;
1029
	ring->write_tail(ring, ring->tail);
1030
}
1031
 
1032
static const struct intel_ring_buffer render_ring = {
1033
	.name			= "render ring",
1034
	.id			= RING_RENDER,
1035
	.mmio_base		= RENDER_RING_BASE,
1036
	.size			= 32 * PAGE_SIZE,
1037
	.init			= init_render_ring,
1038
    .write_tail     = ring_write_tail,
1039
    .flush          = render_ring_flush,
2339 Serge 1040
    .add_request        = render_ring_add_request,
2332 Serge 1041
//   .get_seqno      = ring_get_seqno,
1042
//   .irq_get        = render_ring_get_irq,
1043
//   .irq_put        = render_ring_put_irq,
2340 Serge 1044
   .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
2332 Serge 1045
//       .cleanup            = render_ring_cleanup,
1046
};
1047
 
1048
/* ring buffer for bit-stream decoder */
1049
 
1050
static const struct intel_ring_buffer bsd_ring = {
1051
	.name                   = "bsd ring",
1052
	.id			= RING_BSD,
1053
	.mmio_base		= BSD_RING_BASE,
1054
	.size			= 32 * PAGE_SIZE,
1055
	.init			= init_ring_common,
1056
	.write_tail		= ring_write_tail,
1057
    .flush          = bsd_ring_flush,
2339 Serge 1058
    .add_request        = ring_add_request,
2332 Serge 1059
//   .get_seqno      = ring_get_seqno,
1060
//   .irq_get        = bsd_ring_get_irq,
1061
//   .irq_put        = bsd_ring_put_irq,
2340 Serge 1062
   .dispatch_execbuffer    = ring_dispatch_execbuffer,
2332 Serge 1063
};
1064
 
1065
 
1066
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1067
				     u32 value)
1068
{
1069
       drm_i915_private_t *dev_priv = ring->dev->dev_private;
1070
 
1071
       /* Every tail move must follow the sequence below */
1072
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1073
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1074
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1075
       I915_WRITE(GEN6_BSD_RNCID, 0x0);
1076
 
1077
       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1078
                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1079
                       50))
1080
               DRM_ERROR("timed out waiting for IDLE Indicator\n");
1081
 
1082
       I915_WRITE_TAIL(ring, value);
1083
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1084
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1085
	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1086
}
1087
 
1088
static int gen6_ring_flush(struct intel_ring_buffer *ring,
1089
			   u32 invalidate, u32 flush)
1090
{
1091
	uint32_t cmd;
1092
	int ret;
1093
 
1094
	ret = intel_ring_begin(ring, 4);
1095
	if (ret)
1096
		return ret;
1097
 
1098
	cmd = MI_FLUSH_DW;
1099
	if (invalidate & I915_GEM_GPU_DOMAINS)
1100
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1101
	intel_ring_emit(ring, cmd);
1102
	intel_ring_emit(ring, 0);
1103
	intel_ring_emit(ring, 0);
1104
	intel_ring_emit(ring, MI_NOOP);
1105
	intel_ring_advance(ring);
1106
	return 0;
1107
}
1108
 
1109
static int
1110
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1111
			      u32 offset, u32 len)
1112
{
1113
       int ret;
1114
 
1115
       ret = intel_ring_begin(ring, 2);
1116
       if (ret)
1117
	       return ret;
1118
 
1119
       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1120
       /* bit0-7 is the length on GEN6+ */
1121
       intel_ring_emit(ring, offset);
1122
       intel_ring_advance(ring);
1123
 
1124
       return 0;
1125
}
1126
 
2340 Serge 1127
#if 0
1128
 
2332 Serge 1129
static bool
1130
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1131
{
1132
	return gen6_ring_get_irq(ring,
1133
				 GT_USER_INTERRUPT,
1134
				 GEN6_RENDER_USER_INTERRUPT);
1135
}
1136
 
1137
static void
1138
gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1139
{
1140
	return gen6_ring_put_irq(ring,
1141
				 GT_USER_INTERRUPT,
1142
				 GEN6_RENDER_USER_INTERRUPT);
1143
}
1144
 
1145
static bool
1146
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1147
{
1148
	return gen6_ring_get_irq(ring,
1149
				 GT_GEN6_BSD_USER_INTERRUPT,
1150
				 GEN6_BSD_USER_INTERRUPT);
1151
}
1152
 
1153
static void
1154
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1155
{
1156
	return gen6_ring_put_irq(ring,
1157
				 GT_GEN6_BSD_USER_INTERRUPT,
1158
				 GEN6_BSD_USER_INTERRUPT);
1159
}
1160
 
1161
#endif
1162
 
1163
/* ring buffer for Video Codec for Gen6+ */
1164
static const struct intel_ring_buffer gen6_bsd_ring = {
1165
	.name			= "gen6 bsd ring",
1166
	.id			= RING_BSD,
1167
	.mmio_base		= GEN6_BSD_RING_BASE,
1168
	.size			= 32 * PAGE_SIZE,
1169
	.init			= init_ring_common,
1170
	.write_tail		= gen6_bsd_ring_write_tail,
1171
    .flush          = gen6_ring_flush,
2339 Serge 1172
    .add_request        = gen6_add_request,
2332 Serge 1173
//   .get_seqno      = ring_get_seqno,
1174
//   .irq_get        = gen6_bsd_ring_get_irq,
1175
//   .irq_put        = gen6_bsd_ring_put_irq,
2340 Serge 1176
   .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
2332 Serge 1177
};
1178
 
1179
#if 0
1180
/* Blitter support (SandyBridge+) */
1181
 
1182
static bool
1183
blt_ring_get_irq(struct intel_ring_buffer *ring)
1184
{
1185
	return gen6_ring_get_irq(ring,
1186
				 GT_BLT_USER_INTERRUPT,
1187
				 GEN6_BLITTER_USER_INTERRUPT);
1188
}
1189
 
1190
static void
1191
blt_ring_put_irq(struct intel_ring_buffer *ring)
1192
{
1193
	gen6_ring_put_irq(ring,
1194
			  GT_BLT_USER_INTERRUPT,
1195
			  GEN6_BLITTER_USER_INTERRUPT);
1196
}
1197
#endif
1198
 
1199
 
1200
/* Workaround for some stepping of SNB,
1201
 * each time when BLT engine ring tail moved,
1202
 * the first command in the ring to be parsed
1203
 * should be MI_BATCH_BUFFER_START
1204
 */
1205
#define NEED_BLT_WORKAROUND(dev) \
1206
	(IS_GEN6(dev) && (dev->pdev->revision < 8))
1207
 
1208
static inline struct drm_i915_gem_object *
1209
to_blt_workaround(struct intel_ring_buffer *ring)
1210
{
1211
	return ring->private;
1212
}
1213
 
1214
static int blt_ring_init(struct intel_ring_buffer *ring)
1215
{
1216
	if (NEED_BLT_WORKAROUND(ring->dev)) {
1217
		struct drm_i915_gem_object *obj;
1218
		u32 *ptr;
1219
		int ret;
1220
 
1221
		obj = i915_gem_alloc_object(ring->dev, 4096);
1222
		if (obj == NULL)
1223
			return -ENOMEM;
1224
 
1225
		ret = i915_gem_object_pin(obj, 4096, true);
1226
		if (ret) {
1227
//           drm_gem_object_unreference(&obj->base);
1228
			return ret;
1229
		}
1230
 
1231
        ptr = ioremap(obj->pages[0], 4096);
1232
		*ptr++ = MI_BATCH_BUFFER_END;
1233
		*ptr++ = MI_NOOP;
2335 Serge 1234
//        iounmap(obj->pages[0]);
2332 Serge 1235
 
1236
		ret = i915_gem_object_set_to_gtt_domain(obj, false);
1237
		if (ret) {
1238
//           i915_gem_object_unpin(obj);
1239
//           drm_gem_object_unreference(&obj->base);
1240
			return ret;
1241
		}
1242
 
1243
		ring->private = obj;
1244
	}
1245
 
1246
	return init_ring_common(ring);
1247
}
1248
 
1249
static int blt_ring_begin(struct intel_ring_buffer *ring,
1250
			  int num_dwords)
1251
{
1252
	if (ring->private) {
1253
		int ret = intel_ring_begin(ring, num_dwords+2);
1254
		if (ret)
1255
			return ret;
1256
 
1257
		intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1258
		intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1259
 
1260
		return 0;
1261
	} else
1262
		return intel_ring_begin(ring, 4);
1263
}
1264
 
1265
static int blt_ring_flush(struct intel_ring_buffer *ring,
1266
			  u32 invalidate, u32 flush)
1267
{
1268
	uint32_t cmd;
1269
	int ret;
1270
 
1271
	ret = blt_ring_begin(ring, 4);
1272
	if (ret)
1273
		return ret;
1274
 
1275
	cmd = MI_FLUSH_DW;
1276
	if (invalidate & I915_GEM_DOMAIN_RENDER)
1277
		cmd |= MI_INVALIDATE_TLB;
1278
	intel_ring_emit(ring, cmd);
1279
	intel_ring_emit(ring, 0);
1280
	intel_ring_emit(ring, 0);
1281
	intel_ring_emit(ring, MI_NOOP);
1282
	intel_ring_advance(ring);
1283
	return 0;
1284
}
1285
 
1286
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1287
{
1288
	if (!ring->private)
1289
		return;
1290
 
1291
	i915_gem_object_unpin(ring->private);
1292
	drm_gem_object_unreference(ring->private);
1293
	ring->private = NULL;
1294
}
1295
 
1296
static const struct intel_ring_buffer gen6_blt_ring = {
1297
       .name			= "blt ring",
1298
       .id			= RING_BLT,
1299
       .mmio_base		= BLT_RING_BASE,
1300
       .size			= 32 * PAGE_SIZE,
1301
       .init			= blt_ring_init,
1302
       .write_tail		= ring_write_tail,
1303
       .flush          = blt_ring_flush,
2339 Serge 1304
       .add_request        = gen6_add_request,
2332 Serge 1305
//       .get_seqno      = ring_get_seqno,
1306
//       .irq_get            = blt_ring_get_irq,
1307
//       .irq_put            = blt_ring_put_irq,
2340 Serge 1308
       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
2332 Serge 1309
//       .cleanup            = blt_ring_cleanup,
1310
};
1311
 
1312
int intel_init_render_ring_buffer(struct drm_device *dev)
1313
{
1314
	drm_i915_private_t *dev_priv = dev->dev_private;
1315
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2340 Serge 1316
 
2332 Serge 1317
	*ring = render_ring;
1318
	if (INTEL_INFO(dev)->gen >= 6) {
2339 Serge 1319
       ring->add_request = gen6_add_request;
2332 Serge 1320
//       ring->irq_get = gen6_render_ring_get_irq;
1321
//       ring->irq_put = gen6_render_ring_put_irq;
1322
	} else if (IS_GEN5(dev)) {
2339 Serge 1323
       ring->add_request = pc_render_add_request;
2332 Serge 1324
//       ring->get_seqno = pc_render_get_seqno;
1325
	}
1326
 
1327
	if (!I915_NEED_GFX_HWS(dev)) {
1328
		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1329
		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1330
	}
2340 Serge 1331
 
2332 Serge 1332
	return intel_init_ring_buffer(dev, ring);
1333
}
1334
 
1335
 
1336
int intel_init_bsd_ring_buffer(struct drm_device *dev)
1337
{
1338
	drm_i915_private_t *dev_priv = dev->dev_private;
1339
	struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1340
 
1341
	if (IS_GEN6(dev) || IS_GEN7(dev))
1342
		*ring = gen6_bsd_ring;
1343
	else
1344
		*ring = bsd_ring;
1345
 
1346
	return intel_init_ring_buffer(dev, ring);
1347
}
1348
 
1349
int intel_init_blt_ring_buffer(struct drm_device *dev)
1350
{
1351
	drm_i915_private_t *dev_priv = dev->dev_private;
1352
	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1353
 
1354
	*ring = gen6_blt_ring;
1355
 
1356
	return intel_init_ring_buffer(dev, ring);
1357
}