Subversion Repositories Kolibri OS

Rev

Rev 2340 | Rev 2344 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2340 Rev 2342
Line 34... Line 34...
34
#include "i915_drv.h"
34
#include "i915_drv.h"
35
#include "i915_drm.h"
35
#include "i915_drm.h"
36
//#include "i915_trace.h"
36
//#include "i915_trace.h"
37
#include "intel_drv.h"
37
#include "intel_drv.h"
Line -... Line 38...
-
 
38
 
-
 
39
/*
-
 
40
 * 965+ support PIPE_CONTROL commands, which provide finer grained control
-
 
41
 * over cache flushing.
-
 
42
 */
-
 
43
struct pipe_control {
-
 
44
	struct drm_i915_gem_object *obj;
-
 
45
	volatile u32 *cpu_page;
-
 
46
	u32 gtt_offset;
-
 
47
};
38
 
48
 
39
static inline int ring_space(struct intel_ring_buffer *ring)
49
static inline int ring_space(struct intel_ring_buffer *ring)
40
{
50
{
41
	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
51
	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
42
	if (space < 0)
52
	if (space < 0)
Line 123... Line 133...
123
	intel_ring_advance(ring);
133
	intel_ring_advance(ring);
Line 124... Line 134...
124
 
134
 
125
	return 0;
135
	return 0;
Line -... Line 136...
-
 
136
}
-
 
137
 
-
 
138
/**
-
 
139
 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
-
 
140
 * implementing two workarounds on gen6.  From section 1.4.7.1
-
 
141
 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
-
 
142
 *
-
 
143
 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
-
 
144
 * produced by non-pipelined state commands), software needs to first
-
 
145
 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
-
 
146
 * 0.
-
 
147
 *
-
 
148
 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
-
 
149
 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
-
 
150
 *
-
 
151
 * And the workaround for these two requires this workaround first:
-
 
152
 *
-
 
153
 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
-
 
154
 * BEFORE the pipe-control with a post-sync op and no write-cache
-
 
155
 * flushes.
-
 
156
 *
-
 
157
 * And this last workaround is tricky because of the requirements on
-
 
158
 * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
-
 
159
 * volume 2 part 1:
-
 
160
 *
-
 
161
 *     "1 of the following must also be set:
-
 
162
 *      - Render Target Cache Flush Enable ([12] of DW1)
-
 
163
 *      - Depth Cache Flush Enable ([0] of DW1)
-
 
164
 *      - Stall at Pixel Scoreboard ([1] of DW1)
-
 
165
 *      - Depth Stall ([13] of DW1)
-
 
166
 *      - Post-Sync Operation ([13] of DW1)
-
 
167
 *      - Notify Enable ([8] of DW1)"
-
 
168
 *
-
 
169
 * The cache flushes require the workaround flush that triggered this
-
 
170
 * one, so we can't use it.  Depth stall would trigger the same.
-
 
171
 * Post-sync nonzero is what triggered this second workaround, so we
-
 
172
 * can't use that one either.  Notify enable is IRQs, which aren't
-
 
173
 * really our business.  That leaves only stall at scoreboard.
-
 
174
 */
-
 
175
static int
-
 
176
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
-
 
177
{
-
 
178
	struct pipe_control *pc = ring->private;
-
 
179
	u32 scratch_addr = pc->gtt_offset + 128;
-
 
180
	int ret;
-
 
181
 
-
 
182
 
-
 
183
	ret = intel_ring_begin(ring, 6);
-
 
184
	if (ret)
-
 
185
		return ret;
-
 
186
 
-
 
187
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-
 
188
	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
-
 
189
			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-
 
190
	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-
 
191
	intel_ring_emit(ring, 0); /* low dword */
-
 
192
	intel_ring_emit(ring, 0); /* high dword */
-
 
193
	intel_ring_emit(ring, MI_NOOP);
-
 
194
	intel_ring_advance(ring);
-
 
195
 
-
 
196
	ret = intel_ring_begin(ring, 6);
-
 
197
	if (ret)
-
 
198
		return ret;
-
 
199
 
-
 
200
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-
 
201
	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-
 
202
	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-
 
203
	intel_ring_emit(ring, 0);
-
 
204
	intel_ring_emit(ring, 0);
-
 
205
	intel_ring_emit(ring, MI_NOOP);
-
 
206
	intel_ring_advance(ring);
-
 
207
 
-
 
208
	return 0;
-
 
209
}
-
 
210
 
-
 
211
static int
-
 
212
gen6_render_ring_flush(struct intel_ring_buffer *ring,
-
 
213
                         u32 invalidate_domains, u32 flush_domains)
-
 
214
{
-
 
215
	u32 flags = 0;
-
 
216
	struct pipe_control *pc = ring->private;
-
 
217
	u32 scratch_addr = pc->gtt_offset + 128;
-
 
218
	int ret;
-
 
219
 
-
 
220
	/* Force SNB workarounds for PIPE_CONTROL flushes */
-
 
221
	intel_emit_post_sync_nonzero_flush(ring);
-
 
222
 
-
 
223
	/* Just flush everything.  Experiments have shown that reducing the
-
 
224
	 * number of bits based on the write domains has little performance
-
 
225
	 * impact.
-
 
226
	 */
-
 
227
	flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
-
 
228
	flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
-
 
229
	flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
-
 
230
	flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
-
 
231
	flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
-
 
232
	flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
-
 
233
	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-
 
234
 
-
 
235
	ret = intel_ring_begin(ring, 6);
-
 
236
	if (ret)
-
 
237
		return ret;
-
 
238
 
-
 
239
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-
 
240
	intel_ring_emit(ring, flags);
-
 
241
	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-
 
242
	intel_ring_emit(ring, 0); /* lower dword */
-
 
243
	intel_ring_emit(ring, 0); /* uppwer dword */
-
 
244
	intel_ring_emit(ring, MI_NOOP);
-
 
245
	intel_ring_advance(ring);
-
 
246
 
-
 
247
	return 0;
126
}
248
}
127
 
249
 
128
static void ring_write_tail(struct intel_ring_buffer *ring,
250
static void ring_write_tail(struct intel_ring_buffer *ring,
129
			    u32 value)
251
			    u32 value)
130
{
252
{
Line 203... Line 325...
203
 
325
 
204
 
326
 
Line 205... Line -...
205
	return 0;
-
 
206
}
-
 
207
 
-
 
208
/*
-
 
209
 * 965+ support PIPE_CONTROL commands, which provide finer grained control
-
 
210
 * over cache flushing.
-
 
211
 */
-
 
212
struct pipe_control {
-
 
213
	struct drm_i915_gem_object *obj;
-
 
214
	volatile u32 *cpu_page;
-
 
215
	u32 gtt_offset;
327
	return 0;
216
};
328
}
217
 
329
 
218
static int
330
static int
219
init_pipe_control(struct intel_ring_buffer *ring)
331
init_pipe_control(struct intel_ring_buffer *ring)
Line 293... Line 405...
293
			I915_WRITE(GFX_MODE_GEN7,
405
			I915_WRITE(GFX_MODE_GEN7,
294
				   GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
406
				   GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
295
				   GFX_MODE_ENABLE(GFX_REPLAY_MODE));
407
				   GFX_MODE_ENABLE(GFX_REPLAY_MODE));
296
	}
408
	}
Line 297... Line 409...
297
 
409
 
298
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
299
	} else if (IS_GEN5(dev)) {
410
	if (INTEL_INFO(dev)->gen >= 5) {
300
		ret = init_pipe_control(ring);
411
		ret = init_pipe_control(ring);
301
		if (ret)
412
		if (ret)
302
			return ret;
413
			return ret;
Line -... Line 414...
-
 
414
	}
-
 
415
 
-
 
416
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
417
		I915_WRITE(INSTPM,
-
 
418
			   INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
303
	}
419
	}
304
 
420
 
Line 305... Line 421...
305
	return ret;
421
	return ret;
306
}
422
}
Line 312... Line 428...
312
 
428
 
313
	cleanup_pipe_control(ring);
429
	cleanup_pipe_control(ring);
Line 314... Line 430...
314
}
430
}
315
 
431
 
-
 
432
static void
-
 
433
update_mboxes(struct intel_ring_buffer *ring,
316
static void
434
	    u32 seqno,
317
update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
-
 
318
{
-
 
319
	struct drm_device *dev = ring->dev;
-
 
320
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
321
	int id;
-
 
322
 
-
 
323
	/*
-
 
324
	 * cs -> 1 = vcs, 0 = bcs
-
 
325
	 * vcs -> 1 = bcs, 0 = cs,
-
 
326
	 * bcs -> 1 = cs, 0 = vcs.
-
 
327
	 */
-
 
328
	id = ring - dev_priv->ring;
-
 
329
	id += 2 - i;
-
 
330
	id %= 3;
435
	    u32 mmio_offset)
331
 
436
{
332
	intel_ring_emit(ring,
437
	intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
333
			MI_SEMAPHORE_MBOX |
438
			      MI_SEMAPHORE_GLOBAL_GTT |
334
			MI_SEMAPHORE_REGISTER |
439
			MI_SEMAPHORE_REGISTER |
335
			MI_SEMAPHORE_UPDATE);
440
			MI_SEMAPHORE_UPDATE);
336
	intel_ring_emit(ring, seqno);
-
 
337
	intel_ring_emit(ring,
441
	intel_ring_emit(ring, seqno);
Line -... Line 442...
-
 
442
	intel_ring_emit(ring, mmio_offset);
-
 
443
}
-
 
444
 
-
 
445
/**
-
 
446
 * gen6_add_request - Update the semaphore mailbox registers
-
 
447
 *
-
 
448
 * @ring - ring that is adding a request
-
 
449
 * @seqno - return seqno stuck into the ring
-
 
450
 *
338
			RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
451
 * Update the mailbox registers in the *other* rings with the current seqno.
339
}
452
 * This acts like a signal in the canonical semaphore.
340
 
453
 */
341
static int
454
static int
-
 
455
gen6_add_request(struct intel_ring_buffer *ring,
342
gen6_add_request(struct intel_ring_buffer *ring,
456
		 u32 *seqno)
343
		 u32 *result)
457
{
Line 344... Line 458...
344
{
458
	u32 mbox1_reg;
345
	u32 seqno;
459
	u32 mbox2_reg;
346
	int ret;
460
	int ret;
Line 347... Line 461...
347
 
461
 
348
	ret = intel_ring_begin(ring, 10);
462
	ret = intel_ring_begin(ring, 10);
-
 
463
	if (ret)
349
	if (ret)
464
		return ret;
Line -... Line 465...
-
 
465
 
-
 
466
	mbox1_reg = ring->signal_mbox[0];
350
		return ret;
467
	mbox2_reg = ring->signal_mbox[1];
351
 
468
 
352
	seqno = i915_gem_get_seqno(ring->dev);
469
	*seqno = i915_gem_get_seqno(ring->dev);
353
	update_semaphore(ring, 0, seqno);
470
 
354
	update_semaphore(ring, 1, seqno);
471
	update_mboxes(ring, *seqno, mbox1_reg);
Line 355... Line -...
355
 
-
 
356
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
472
	update_mboxes(ring, *seqno, mbox2_reg);
357
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
473
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
Line -... Line 474...
-
 
474
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-
 
475
	intel_ring_emit(ring, *seqno);
-
 
476
	intel_ring_emit(ring, MI_USER_INTERRUPT);
-
 
477
	intel_ring_advance(ring);
-
 
478
 
-
 
479
	return 0;
358
	intel_ring_emit(ring, seqno);
480
}
-
 
481
 
359
	intel_ring_emit(ring, MI_USER_INTERRUPT);
482
/**
360
	intel_ring_advance(ring);
483
 * intel_ring_sync - sync the waiter to the signaller on seqno
-
 
484
 *
361
 
485
 * @waiter - ring that is waiting
362
	*result = seqno;
486
 * @signaller - ring which has, or will signal
363
	return 0;
487
 * @seqno - seqno which the waiter will block on
-
 
488
 */
-
 
489
static int
-
 
490
intel_ring_sync(struct intel_ring_buffer *waiter,
Line 364... Line 491...
364
}
491
		struct intel_ring_buffer *signaller,
365
 
492
		int ring,
366
int
493
		u32 seqno)
Line 367... Line -...
367
intel_ring_sync(struct intel_ring_buffer *ring,
-
 
368
		struct intel_ring_buffer *to,
-
 
369
		u32 seqno)
-
 
370
{
494
{
371
	int ret;
-
 
372
 
495
	int ret;
373
	ret = intel_ring_begin(ring, 4);
496
	u32 dw1 = MI_SEMAPHORE_MBOX |
374
	if (ret)
497
		  MI_SEMAPHORE_COMPARE |
375
		return ret;
498
		  MI_SEMAPHORE_REGISTER;
Line 376... Line 499...
376
 
499
 
377
	intel_ring_emit(ring,
500
	ret = intel_ring_begin(waiter, 4);
Line -... Line 501...
-
 
501
	if (ret)
-
 
502
		return ret;
-
 
503
 
-
 
504
	intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
-
 
505
	intel_ring_emit(waiter, seqno);
-
 
506
	intel_ring_emit(waiter, 0);
-
 
507
	intel_ring_emit(waiter, MI_NOOP);
-
 
508
	intel_ring_advance(waiter);
-
 
509
 
-
 
510
	return 0;
-
 
511
}
-
 
512
 
-
 
513
/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
-
 
514
int
-
 
515
render_ring_sync_to(struct intel_ring_buffer *waiter,
-
 
516
		    struct intel_ring_buffer *signaller,
-
 
517
		    u32 seqno)
-
 
518
{
-
 
519
//   WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
-
 
520
	return intel_ring_sync(waiter,
-
 
521
			       signaller,
-
 
522
			       RCS,
-
 
523
			       seqno);
-
 
524
}
-
 
525
 
-
 
526
/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
-
 
527
int
-
 
528
gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
-
 
529
		      struct intel_ring_buffer *signaller,
-
 
530
		      u32 seqno)
-
 
531
{
-
 
532
//   WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
-
 
533
	return intel_ring_sync(waiter,
-
 
534
			       signaller,
-
 
535
			       VCS,
-
 
536
			       seqno);
-
 
537
}
-
 
538
 
-
 
539
/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
-
 
540
int
-
 
541
gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
378
			MI_SEMAPHORE_MBOX |
542
		      struct intel_ring_buffer *signaller,
379
			MI_SEMAPHORE_REGISTER |
543
		      u32 seqno)
380
			intel_ring_sync_index(ring, to) << 17 |
544
{
381
			MI_SEMAPHORE_COMPARE);
545
//   WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
382
	intel_ring_emit(ring, seqno);
546
	return intel_ring_sync(waiter,
383
	intel_ring_emit(ring, 0);
547
			       signaller,
384
	intel_ring_emit(ring, MI_NOOP);
548
			       BCS,
385
	intel_ring_advance(ring);
549
			       seqno);
Line 416... Line 580...
416
	 */
580
	 */
417
	ret = intel_ring_begin(ring, 32);
581
	ret = intel_ring_begin(ring, 32);
418
	if (ret)
582
	if (ret)
419
		return ret;
583
		return ret;
Line 420... Line 584...
420
 
584
 
-
 
585
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
421
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
586
			PIPE_CONTROL_WRITE_FLUSH |
422
			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
587
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
423
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
588
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
424
	intel_ring_emit(ring, seqno);
589
	intel_ring_emit(ring, seqno);
425
	intel_ring_emit(ring, 0);
590
	intel_ring_emit(ring, 0);
426
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
591
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
Line 432... Line 597...
432
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
597
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
433
	scratch_addr += 128;
598
	scratch_addr += 128;
434
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
599
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
435
	scratch_addr += 128;
600
	scratch_addr += 128;
436
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
601
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
437
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
602
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
438
			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
603
			PIPE_CONTROL_WRITE_FLUSH |
-
 
604
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
439
			PIPE_CONTROL_NOTIFY);
605
			PIPE_CONTROL_NOTIFY);
440
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
606
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
441
	intel_ring_emit(ring, seqno);
607
	intel_ring_emit(ring, seqno);
442
	intel_ring_emit(ring, 0);
608
	intel_ring_emit(ring, 0);
443
	intel_ring_advance(ring);
609
	intel_ring_advance(ring);
Line 467... Line 633...
467
	*result = seqno;
633
	*result = seqno;
468
	return 0;
634
	return 0;
469
}
635
}
Line 470... Line 636...
470
 
636
 
-
 
637
static u32
-
 
638
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
-
 
639
{
-
 
640
	struct drm_device *dev = ring->dev;
-
 
641
 
-
 
642
	/* Workaround to force correct ordering between irq and seqno writes on
-
 
643
	 * ivb (and maybe also on snb) by reading from a CS register (like
-
 
644
	 * ACTHD) before reading the status page. */
-
 
645
	if (IS_GEN7(dev))
-
 
646
		intel_ring_get_active_head(ring);
-
 
647
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
-
 
648
}
-
 
649
 
471
static u32
650
static u32
472
ring_get_seqno(struct intel_ring_buffer *ring)
651
ring_get_seqno(struct intel_ring_buffer *ring)
473
{
652
{
474
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
653
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
Line 511... Line 690...
511
	dev_priv->irq_mask |= mask;
690
	dev_priv->irq_mask |= mask;
512
	I915_WRITE(IMR, dev_priv->irq_mask);
691
	I915_WRITE(IMR, dev_priv->irq_mask);
513
	POSTING_READ(IMR);
692
	POSTING_READ(IMR);
514
}
693
}
Line 515... Line -...
515
 
-
 
516
#if 0
694
 
517
static bool
695
static bool
518
render_ring_get_irq(struct intel_ring_buffer *ring)
696
render_ring_get_irq(struct intel_ring_buffer *ring)
519
{
697
{
520
	struct drm_device *dev = ring->dev;
698
	struct drm_device *dev = ring->dev;
Line 551... Line 729...
551
		else
729
		else
552
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
730
			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
553
	}
731
	}
554
	spin_unlock(&ring->irq_lock);
732
	spin_unlock(&ring->irq_lock);
555
}
733
}
556
#endif
-
 
Line 557... Line 734...
557
 
734
 
558
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
735
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
559
{
736
{
560
	struct drm_device *dev = ring->dev;
737
	struct drm_device *dev = ring->dev;
Line 624... Line 801...
624
 
801
 
625
	*result = seqno;
802
	*result = seqno;
626
	return 0;
803
	return 0;
Line 627... Line -...
627
}
-
 
628
 
-
 
629
#if 0
804
}
630
 
805
 
631
static bool
806
static bool
632
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
807
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
633
{
808
{
Line 634... Line 809...
634
	struct drm_device *dev = ring->dev;
809
	struct drm_device *dev = ring->dev;
635
	drm_i915_private_t *dev_priv = dev->dev_private;
810
	drm_i915_private_t *dev_priv = dev->dev_private;
Line -... Line 811...
-
 
811
 
-
 
812
	if (!dev->irq_enabled)
-
 
813
	       return false;
-
 
814
 
-
 
815
	/* It looks like we need to prevent the gt from suspending while waiting
-
 
816
	 * for an notifiy irq, otherwise irqs seem to get lost on at least the
636
 
817
	 * blt/bsd rings on ivb. */
637
	if (!dev->irq_enabled)
818
	if (IS_GEN7(dev))
638
	       return false;
819
		gen6_gt_force_wake_get(dev_priv);
639
 
820
 
640
	spin_lock(&ring->irq_lock);
821
	spin_lock(&ring->irq_lock);
Line 659... Line 840...
659
		ring->irq_mask |= rflag;
840
		ring->irq_mask |= rflag;
660
		I915_WRITE_IMR(ring, ring->irq_mask);
841
		I915_WRITE_IMR(ring, ring->irq_mask);
661
		ironlake_disable_irq(dev_priv, gflag);
842
		ironlake_disable_irq(dev_priv, gflag);
662
	}
843
	}
663
	spin_unlock(&ring->irq_lock);
844
	spin_unlock(&ring->irq_lock);
-
 
845
 
-
 
846
	if (IS_GEN7(dev))
-
 
847
		gen6_gt_force_wake_put(dev_priv);
664
}
848
}
Line 665... Line 849...
665
 
849
 
666
static bool
850
static bool
667
bsd_ring_get_irq(struct intel_ring_buffer *ring)
851
bsd_ring_get_irq(struct intel_ring_buffer *ring)
Line 696... Line 880...
696
		else
880
		else
697
			ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
881
			ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
698
	}
882
	}
699
	spin_unlock(&ring->irq_lock);
883
	spin_unlock(&ring->irq_lock);
700
}
884
}
701
#endif
-
 
Line 702... Line 885...
702
 
885
 
703
static int
886
static int
704
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
887
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
705
{
888
{
Line 826... Line 1009...
826
	INIT_LIST_HEAD(&ring->active_list);
1009
	INIT_LIST_HEAD(&ring->active_list);
827
	INIT_LIST_HEAD(&ring->request_list);
1010
	INIT_LIST_HEAD(&ring->request_list);
828
	INIT_LIST_HEAD(&ring->gpu_write_list);
1011
	INIT_LIST_HEAD(&ring->gpu_write_list);
Line 829... Line 1012...
829
 
1012
 
830
//   init_waitqueue_head(&ring->irq_queue);
1013
//   init_waitqueue_head(&ring->irq_queue);
831
//   spin_lock_init(&ring->irq_lock);
1014
    spin_lock_init(&ring->irq_lock);
Line 832... Line 1015...
832
    ring->irq_mask = ~0;
1015
    ring->irq_mask = ~0;
833
 
1016
 
834
	if (I915_NEED_GFX_HWS(dev)) {
1017
	if (I915_NEED_GFX_HWS(dev)) {
Line 1036... Line 1219...
1036
	.size			= 32 * PAGE_SIZE,
1219
	.size			= 32 * PAGE_SIZE,
1037
	.init			= init_render_ring,
1220
	.init			= init_render_ring,
1038
    .write_tail     = ring_write_tail,
1221
    .write_tail     = ring_write_tail,
1039
    .flush          = render_ring_flush,
1222
    .flush          = render_ring_flush,
1040
    .add_request        = render_ring_add_request,
1223
    .add_request        = render_ring_add_request,
1041
//   .get_seqno      = ring_get_seqno,
1224
    .get_seqno      = ring_get_seqno,
1042
//   .irq_get        = render_ring_get_irq,
1225
	.irq_get		= render_ring_get_irq,
1043
//   .irq_put        = render_ring_put_irq,
1226
	.irq_put		= render_ring_put_irq,
1044
   .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1227
   .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1045
//       .cleanup            = render_ring_cleanup,
1228
//       .cleanup            = render_ring_cleanup,
-
 
1229
	.sync_to		= render_ring_sync_to,
-
 
1230
	.semaphore_register	= {MI_SEMAPHORE_SYNC_INVALID,
-
 
1231
				   MI_SEMAPHORE_SYNC_RV,
-
 
1232
				   MI_SEMAPHORE_SYNC_RB},
-
 
1233
	.signal_mbox		= {GEN6_VRSYNC, GEN6_BRSYNC},
1046
};
1234
};
Line 1047... Line 1235...
1047
 
1235
 
Line 1048... Line 1236...
1048
/* ring buffer for bit-stream decoder */
1236
/* ring buffer for bit-stream decoder */
Line 1054... Line 1242...
1054
	.size			= 32 * PAGE_SIZE,
1242
	.size			= 32 * PAGE_SIZE,
1055
	.init			= init_ring_common,
1243
	.init			= init_ring_common,
1056
	.write_tail		= ring_write_tail,
1244
	.write_tail		= ring_write_tail,
1057
    .flush          = bsd_ring_flush,
1245
    .flush          = bsd_ring_flush,
1058
    .add_request        = ring_add_request,
1246
    .add_request        = ring_add_request,
1059
//   .get_seqno      = ring_get_seqno,
1247
    .get_seqno      = ring_get_seqno,
1060
//   .irq_get        = bsd_ring_get_irq,
1248
	.irq_get		= bsd_ring_get_irq,
1061
//   .irq_put        = bsd_ring_put_irq,
1249
	.irq_put		= bsd_ring_put_irq,
1062
   .dispatch_execbuffer    = ring_dispatch_execbuffer,
1250
   .dispatch_execbuffer    = ring_dispatch_execbuffer,
1063
};
1251
};
Line 1064... Line 1252...
1064
 
1252
 
Line 1122... Line 1310...
1122
       intel_ring_advance(ring);
1310
       intel_ring_advance(ring);
Line 1123... Line 1311...
1123
 
1311
 
1124
       return 0;
1312
       return 0;
Line 1125... Line -...
1125
}
-
 
1126
 
-
 
1127
#if 0
1313
}
1128
 
1314
 
1129
static bool
1315
static bool
1130
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1316
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1131
{
1317
{
Line 1156... Line 1342...
1156
	return gen6_ring_put_irq(ring,
1342
	return gen6_ring_put_irq(ring,
1157
				 GT_GEN6_BSD_USER_INTERRUPT,
1343
				 GT_GEN6_BSD_USER_INTERRUPT,
1158
				 GEN6_BSD_USER_INTERRUPT);
1344
				 GEN6_BSD_USER_INTERRUPT);
1159
}
1345
}
Line 1160... Line -...
1160
 
-
 
1161
#endif
-
 
1162
 
1346
 
1163
/* ring buffer for Video Codec for Gen6+ */
1347
/* ring buffer for Video Codec for Gen6+ */
1164
static const struct intel_ring_buffer gen6_bsd_ring = {
1348
static const struct intel_ring_buffer gen6_bsd_ring = {
1165
	.name			= "gen6 bsd ring",
1349
	.name			= "gen6 bsd ring",
1166
	.id			= RING_BSD,
1350
	.id			= RING_BSD,
1167
	.mmio_base		= GEN6_BSD_RING_BASE,
1351
	.mmio_base		= GEN6_BSD_RING_BASE,
1168
	.size			= 32 * PAGE_SIZE,
1352
	.size			= 32 * PAGE_SIZE,
1169
	.init			= init_ring_common,
1353
	.init			= init_ring_common,
1170
	.write_tail		= gen6_bsd_ring_write_tail,
1354
	.write_tail		= gen6_bsd_ring_write_tail,
1171
    .flush          = gen6_ring_flush,
1355
    .flush          = gen6_ring_flush,
1172
    .add_request        = gen6_add_request,
1356
    .add_request        = gen6_add_request,
1173
//   .get_seqno      = ring_get_seqno,
1357
	.get_seqno		= gen6_ring_get_seqno,
1174
//   .irq_get        = gen6_bsd_ring_get_irq,
1358
	.irq_get		= gen6_bsd_ring_get_irq,
1175
//   .irq_put        = gen6_bsd_ring_put_irq,
1359
	.irq_put		= gen6_bsd_ring_put_irq,
-
 
1360
   .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
-
 
1361
	.sync_to		= gen6_bsd_ring_sync_to,
-
 
1362
	.semaphore_register	= {MI_SEMAPHORE_SYNC_VR,
-
 
1363
				   MI_SEMAPHORE_SYNC_INVALID,
-
 
1364
				   MI_SEMAPHORE_SYNC_VB},
1176
   .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1365
	.signal_mbox		= {GEN6_RVSYNC, GEN6_BVSYNC},
Line 1177... Line -...
1177
};
-
 
1178
 
1366
};
Line 1179... Line 1367...
1179
#if 0
1367
 
1180
/* Blitter support (SandyBridge+) */
1368
/* Blitter support (SandyBridge+) */
1181
 
1369
 
Line 1192... Line 1380...
1192
{
1380
{
1193
	gen6_ring_put_irq(ring,
1381
	gen6_ring_put_irq(ring,
1194
			  GT_BLT_USER_INTERRUPT,
1382
			  GT_BLT_USER_INTERRUPT,
1195
			  GEN6_BLITTER_USER_INTERRUPT);
1383
			  GEN6_BLITTER_USER_INTERRUPT);
1196
}
1384
}
1197
#endif
-
 
Line 1198... Line 1385...
1198
 
1385
 
1199
 
1386
 
1200
/* Workaround for some stepping of SNB,
1387
/* Workaround for some stepping of SNB,
Line 1300... Line 1487...
1300
       .size			= 32 * PAGE_SIZE,
1487
       .size			= 32 * PAGE_SIZE,
1301
       .init			= blt_ring_init,
1488
       .init			= blt_ring_init,
1302
       .write_tail		= ring_write_tail,
1489
       .write_tail		= ring_write_tail,
1303
       .flush          = blt_ring_flush,
1490
       .flush          = blt_ring_flush,
1304
       .add_request        = gen6_add_request,
1491
       .add_request        = gen6_add_request,
1305
//       .get_seqno      = ring_get_seqno,
1492
	.get_seqno		= gen6_ring_get_seqno,
1306
//       .irq_get            = blt_ring_get_irq,
1493
	.irq_get		= blt_ring_get_irq,
1307
//       .irq_put            = blt_ring_put_irq,
1494
	.irq_put		= blt_ring_put_irq,
1308
       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1495
       .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1309
//       .cleanup            = blt_ring_cleanup,
1496
//       .cleanup            = blt_ring_cleanup,
-
 
1497
	.sync_to		= gen6_blt_ring_sync_to,
-
 
1498
	.semaphore_register	= {MI_SEMAPHORE_SYNC_BR,
-
 
1499
				   MI_SEMAPHORE_SYNC_BV,
-
 
1500
				   MI_SEMAPHORE_SYNC_INVALID},
-
 
1501
	.signal_mbox		= {GEN6_RBSYNC, GEN6_VBSYNC},
1310
};
1502
};
Line 1311... Line 1503...
1311
 
1503
 
1312
int intel_init_render_ring_buffer(struct drm_device *dev)
1504
int intel_init_render_ring_buffer(struct drm_device *dev)
1313
{
1505
{
1314
	drm_i915_private_t *dev_priv = dev->dev_private;
1506
	drm_i915_private_t *dev_priv = dev->dev_private;
Line 1315... Line 1507...
1315
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1507
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1316
 
1508
 
1317
	*ring = render_ring;
1509
	*ring = render_ring;
-
 
1510
	if (INTEL_INFO(dev)->gen >= 6) {
1318
	if (INTEL_INFO(dev)->gen >= 6) {
1511
       ring->add_request = gen6_add_request;
1319
       ring->add_request = gen6_add_request;
1512
		ring->flush = gen6_render_ring_flush;
-
 
1513
		ring->irq_get = gen6_render_ring_get_irq;
1320
//       ring->irq_get = gen6_render_ring_get_irq;
1514
		ring->irq_put = gen6_render_ring_put_irq;
1321
//       ring->irq_put = gen6_render_ring_put_irq;
1515
		ring->get_seqno = gen6_ring_get_seqno;
1322
	} else if (IS_GEN5(dev)) {
1516
	} else if (IS_GEN5(dev)) {
1323
       ring->add_request = pc_render_add_request;
1517
       ring->add_request = pc_render_add_request;
Line 1324... Line 1518...
1324
//       ring->get_seqno = pc_render_get_seqno;
1518
		ring->get_seqno = pc_render_get_seqno;
1325
	}
1519
	}
1326
 
1520