Subversion Repositories Kolibri OS

Rev

Rev 3746 | Rev 4293 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3746 Rev 4104
Line 31... Line 31...
31
#include "i915_drv.h"
31
#include "i915_drv.h"
32
#include 
32
#include 
33
#include "i915_trace.h"
33
#include "i915_trace.h"
34
#include "intel_drv.h"
34
#include "intel_drv.h"
Line 35... Line -...
35
 
-
 
36
/*
-
 
37
 * 965+ support PIPE_CONTROL commands, which provide finer grained control
-
 
38
 * over cache flushing.
-
 
39
 */
-
 
40
struct pipe_control {
-
 
41
	struct drm_i915_gem_object *obj;
-
 
42
	volatile u32 *cpu_page;
-
 
43
	u32 gtt_offset;
-
 
44
};
-
 
45
 
35
 
46
static inline int ring_space(struct intel_ring_buffer *ring)
36
static inline int ring_space(struct intel_ring_buffer *ring)
47
{
37
{
48
	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
38
	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
49
	if (space < 0)
39
	if (space < 0)
Line 173... Line 163...
173
 * really our business.  That leaves only stall at scoreboard.
163
 * really our business.  That leaves only stall at scoreboard.
174
 */
164
 */
175
static int
165
static int
176
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
166
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177
{
167
{
178
	struct pipe_control *pc = ring->private;
-
 
179
	u32 scratch_addr = pc->gtt_offset + 128;
168
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
180
	int ret;
169
	int ret;
Line 181... Line 170...
181
 
170
 
182
 
171
 
Line 211... Line 200...
211
static int
200
static int
212
gen6_render_ring_flush(struct intel_ring_buffer *ring,
201
gen6_render_ring_flush(struct intel_ring_buffer *ring,
213
                         u32 invalidate_domains, u32 flush_domains)
202
                         u32 invalidate_domains, u32 flush_domains)
214
{
203
{
215
	u32 flags = 0;
204
	u32 flags = 0;
216
	struct pipe_control *pc = ring->private;
-
 
217
	u32 scratch_addr = pc->gtt_offset + 128;
205
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
218
	int ret;
206
	int ret;
Line 219... Line 207...
219
 
207
 
220
	/* Force SNB workarounds for PIPE_CONTROL flushes */
208
	/* Force SNB workarounds for PIPE_CONTROL flushes */
221
	ret = intel_emit_post_sync_nonzero_flush(ring);
209
	ret = intel_emit_post_sync_nonzero_flush(ring);
Line 278... Line 266...
278
	intel_ring_advance(ring);
266
	intel_ring_advance(ring);
Line 279... Line 267...
279
 
267
 
280
	return 0;
268
	return 0;
Line -... Line 269...
-
 
269
}
-
 
270
 
-
 
271
static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
-
 
272
{
-
 
273
	int ret;
-
 
274
 
-
 
275
	if (!ring->fbc_dirty)
-
 
276
		return 0;
-
 
277
 
-
 
278
	ret = intel_ring_begin(ring, 4);
-
 
279
	if (ret)
-
 
280
		return ret;
-
 
281
	intel_ring_emit(ring, MI_NOOP);
-
 
282
	/* WaFbcNukeOn3DBlt:ivb/hsw */
-
 
283
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-
 
284
	intel_ring_emit(ring, MSG_FBC_REND_STATE);
-
 
285
	intel_ring_emit(ring, value);
-
 
286
	intel_ring_advance(ring);
-
 
287
 
-
 
288
	ring->fbc_dirty = false;
-
 
289
	return 0;
281
}
290
}
282
 
291
 
283
static int
292
static int
284
gen7_render_ring_flush(struct intel_ring_buffer *ring,
293
gen7_render_ring_flush(struct intel_ring_buffer *ring,
285
		       u32 invalidate_domains, u32 flush_domains)
294
		       u32 invalidate_domains, u32 flush_domains)
286
{
-
 
287
	u32 flags = 0;
295
{
288
	struct pipe_control *pc = ring->private;
296
	u32 flags = 0;
Line 289... Line 297...
289
	u32 scratch_addr = pc->gtt_offset + 128;
297
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
290
	int ret;
298
	int ret;
291
 
299
 
Line 334... Line 342...
334
	intel_ring_emit(ring, flags);
342
	intel_ring_emit(ring, flags);
335
	intel_ring_emit(ring, scratch_addr);
343
	intel_ring_emit(ring, scratch_addr);
336
	intel_ring_emit(ring, 0);
344
	intel_ring_emit(ring, 0);
337
	intel_ring_advance(ring);
345
	intel_ring_advance(ring);
Line -... Line 346...
-
 
346
 
-
 
347
	if (flush_domains)
-
 
348
		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
338
 
349
 
339
	return 0;
350
	return 0;
Line 340... Line 351...
340
}
351
}
341
 
352
 
Line 353... Line 364...
353
			RING_ACTHD(ring->mmio_base) : ACTHD;
364
			RING_ACTHD(ring->mmio_base) : ACTHD;
Line 354... Line 365...
354
 
365
 
355
	return I915_READ(acthd_reg);
366
	return I915_READ(acthd_reg);
Line -... Line 367...
-
 
367
}
-
 
368
 
-
 
369
static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
-
 
370
{
-
 
371
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
372
	u32 addr;
-
 
373
 
-
 
374
	addr = dev_priv->status_page_dmah->busaddr;
-
 
375
	if (INTEL_INFO(ring->dev)->gen >= 4)
-
 
376
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
-
 
377
	I915_WRITE(HWS_PGA, addr);
356
}
378
}
357
 
379
 
358
static int init_ring_common(struct intel_ring_buffer *ring)
380
static int init_ring_common(struct intel_ring_buffer *ring)
359
{
381
{
360
	struct drm_device *dev = ring->dev;
382
	struct drm_device *dev = ring->dev;
Line 364... Line 386...
364
	u32 head;
386
	u32 head;
Line 365... Line 387...
365
 
387
 
366
	if (HAS_FORCE_WAKE(dev))
388
	if (HAS_FORCE_WAKE(dev))
Line -... Line 389...
-
 
389
		gen6_gt_force_wake_get(dev_priv);
-
 
390
 
-
 
391
	if (I915_NEED_GFX_HWS(dev))
-
 
392
		intel_ring_setup_status_page(ring);
-
 
393
	else
367
		gen6_gt_force_wake_get(dev_priv);
394
		ring_setup_phys_status_page(ring);
368
 
395
 
369
	/* Stop the ring if it's running. */
396
	/* Stop the ring if it's running. */
370
	I915_WRITE_CTL(ring, 0);
397
	I915_WRITE_CTL(ring, 0);
Line 398... Line 425...
398
 
425
 
399
	/* Initialize the ring. This must happen _after_ we've cleared the ring
426
	/* Initialize the ring. This must happen _after_ we've cleared the ring
400
	 * registers with the above sequence (the readback of the HEAD registers
427
	 * registers with the above sequence (the readback of the HEAD registers
401
	 * also enforces ordering), otherwise the hw might lose the new ring
428
	 * also enforces ordering), otherwise the hw might lose the new ring
402
	 * register values. */
429
	 * register values. */
403
	I915_WRITE_START(ring, obj->gtt_offset);
430
	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
404
	I915_WRITE_CTL(ring,
431
	I915_WRITE_CTL(ring,
405
			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
432
			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Line 406... Line 433...
406
			| RING_VALID);
433
			| RING_VALID);
407
 
434
 
408
	/* If the head is still not zero, the ring is dead */
435
	/* If the head is still not zero, the ring is dead */
409
	if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
436
	if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
410
		     I915_READ_START(ring) == obj->gtt_offset &&
437
		     I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
411
		     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
438
		     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
412
		DRM_ERROR("%s initialization failed "
439
		DRM_ERROR("%s initialization failed "
413
				"ctl %08x head %08x tail %08x start %08x\n",
440
				"ctl %08x head %08x tail %08x start %08x\n",
Line 423... Line 450...
423
		ring->head = I915_READ_HEAD(ring);
450
		ring->head = I915_READ_HEAD(ring);
424
		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
451
		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
425
		ring->space = ring_space(ring);
452
		ring->space = ring_space(ring);
426
		ring->last_retired_head = -1;
453
		ring->last_retired_head = -1;
Line -... Line 454...
-
 
454
 
-
 
455
	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
427
 
456
 
428
out:
457
out:
429
	if (HAS_FORCE_WAKE(dev))
458
	if (HAS_FORCE_WAKE(dev))
Line 430... Line 459...
430
		gen6_gt_force_wake_put(dev_priv);
459
		gen6_gt_force_wake_put(dev_priv);
431
 
460
 
Line 432... Line 461...
432
	return ret;
461
	return ret;
433
}
462
}
434
 
463
 
435
static int
-
 
436
init_pipe_control(struct intel_ring_buffer *ring)
-
 
437
{
464
static int
Line 438... Line 465...
438
	struct pipe_control *pc;
465
init_pipe_control(struct intel_ring_buffer *ring)
439
	struct drm_i915_gem_object *obj;
466
{
Line 440... Line -...
440
	int ret;
-
 
441
 
-
 
442
	if (ring->private)
-
 
443
		return 0;
-
 
444
 
467
	int ret;
445
	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
468
 
446
	if (!pc)
469
	if (ring->scratch.obj)
447
		return -ENOMEM;
470
		return 0;
448
 
471
 
449
	obj = i915_gem_alloc_object(ring->dev, 4096);
472
	ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
Line 450... Line 473...
450
	if (obj == NULL) {
473
	if (ring->scratch.obj == NULL) {
Line 451... Line 474...
451
		DRM_ERROR("Failed to allocate seqno page\n");
474
		DRM_ERROR("Failed to allocate seqno page\n");
452
		ret = -ENOMEM;
475
		ret = -ENOMEM;
453
		goto err;
476
		goto err;
Line 454... Line 477...
454
	}
477
	}
455
 
478
 
456
	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
479
	i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
-
 
480
 
457
 
481
	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
-
 
482
	if (ret)
Line 458... Line 483...
458
	ret = i915_gem_object_pin(obj, 4096, true, false);
483
		goto err_unref;
459
	if (ret)
484
 
460
		goto err_unref;
-
 
461
 
-
 
462
	pc->gtt_offset = obj->gtt_offset;
-
 
463
	pc->cpu_page =  (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW);
485
	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
Line 464... Line 486...
464
	if (pc->cpu_page == NULL)
486
	ring->scratch.cpu_page = (void*)MapIoMem((addr_t)sg_page(ring->scratch.obj->pages->sgl),4096, PG_SW);
465
		goto err_unpin;
487
	if (ring->scratch.cpu_page == NULL) {
466
 
488
		ret = -ENOMEM;
467
	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
489
		goto err_unpin;
468
			 ring->name, pc->gtt_offset);
490
	}
469
 
-
 
470
	pc->obj = obj;
491
 
471
	ring->private = pc;
492
	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
Line 472... Line -...
472
	return 0;
-
 
473
 
-
 
474
err_unpin:
-
 
475
	i915_gem_object_unpin(obj);
-
 
476
err_unref:
-
 
477
	drm_gem_object_unreference(&obj->base);
-
 
478
err:
-
 
479
	kfree(pc);
-
 
480
	return ret;
-
 
481
}
-
 
482
 
-
 
483
static void
-
 
484
cleanup_pipe_control(struct intel_ring_buffer *ring)
-
 
485
{
-
 
486
	struct pipe_control *pc = ring->private;
-
 
487
	struct drm_i915_gem_object *obj;
-
 
488
 
-
 
489
	if (!ring->private)
-
 
490
		return;
493
			 ring->name, ring->scratch.gtt_offset);
491
 
494
	return 0;
492
	obj = pc->obj;
495
 
493
//	kunmap(obj->pages[0]);
496
err_unpin:
494
	i915_gem_object_unpin(obj);
497
	i915_gem_object_unpin(ring->scratch.obj);
Line 495... Line -...
495
	drm_gem_object_unreference(&obj->base);
-
 
496
 
-
 
497
	kfree(pc);
498
err_unref:
498
	ring->private = NULL;
499
	drm_gem_object_unreference(&ring->scratch.obj->base);
Line 499... Line 500...
499
}
500
err:
500
 
501
	return ret;
501
static int init_render_ring(struct intel_ring_buffer *ring)
502
}
-
 
503
 
-
 
504
static int init_render_ring(struct intel_ring_buffer *ring)
502
{
505
{
503
	struct drm_device *dev = ring->dev;
506
	struct drm_device *dev = ring->dev;
504
	struct drm_i915_private *dev_priv = dev->dev_private;
507
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 505... Line 508...
505
	int ret = init_ring_common(ring);
508
	int ret = init_ring_common(ring);
Line 551... Line 554...
551
 
554
 
552
	if (INTEL_INFO(dev)->gen >= 6)
555
	if (INTEL_INFO(dev)->gen >= 6)
Line 553... Line 556...
553
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
556
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
554
 
557
 
555
	if (HAS_L3_GPU_CACHE(dev))
-
 
556
		I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
-
 
Line 557... Line 558...
557
 
558
	if (HAS_L3_GPU_CACHE(dev))
558
    LEAVE();
559
		I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
Line 559... Line 560...
559
 
560
 
560
	return ret;
561
	return ret;
561
}
562
}
Line 562... Line 563...
562
 
563
 
563
static void render_ring_cleanup(struct intel_ring_buffer *ring)
564
static void render_ring_cleanup(struct intel_ring_buffer *ring)
Line -... Line 565...
-
 
565
{
-
 
566
	struct drm_device *dev = ring->dev;
-
 
567
 
-
 
568
	if (ring->scratch.obj == NULL)
-
 
569
		return;
-
 
570
 
564
{
571
	if (INTEL_INFO(dev)->gen >= 5) {
565
	struct drm_device *dev = ring->dev;
572
//       kunmap(sg_page(ring->scratch.obj->pages->sgl));
Line 566... Line 573...
566
 
573
		i915_gem_object_unpin(ring->scratch.obj);
567
	if (!ring->private)
574
	}
568
		return;
575
 
569
 
576
	drm_gem_object_unreference(&ring->scratch.obj->base);
-
 
577
	ring->scratch.obj = NULL;
-
 
578
}
-
 
579
 
-
 
580
static void
-
 
581
update_mboxes(struct intel_ring_buffer *ring,
-
 
582
	    u32 mmio_offset)
570
	cleanup_pipe_control(ring);
583
{
571
}
584
/* NB: In order to be able to do semaphore MBOX updates for varying number
572
 
585
 * of rings, it's easiest if we round up each individual update to a
-
 
586
 * multiple of 2 (since ring updates must always be a multiple of 2)
573
static void
587
 * even though the actual update only requires 3 dwords.
Line 574... Line 588...
574
update_mboxes(struct intel_ring_buffer *ring,
588
 */
575
	    u32 mmio_offset)
589
#define MBOX_UPDATE_DWORDS 4
576
{
590
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
Line 589... Line 603...
589
 * This acts like a signal in the canonical semaphore.
603
 * This acts like a signal in the canonical semaphore.
590
 */
604
 */
591
static int
605
static int
592
gen6_add_request(struct intel_ring_buffer *ring)
606
gen6_add_request(struct intel_ring_buffer *ring)
593
{
607
{
594
	u32 mbox1_reg;
608
	struct drm_device *dev = ring->dev;
-
 
609
	struct drm_i915_private *dev_priv = dev->dev_private;
595
	u32 mbox2_reg;
610
	struct intel_ring_buffer *useless;
596
	int ret;
611
	int i, ret;
Line 597... Line 612...
597
 
612
 
-
 
613
	ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
-
 
614
				      MBOX_UPDATE_DWORDS) +
598
	ret = intel_ring_begin(ring, 10);
615
				      4);
599
	if (ret)
616
	if (ret)
-
 
617
		return ret;
Line -... Line 618...
-
 
618
#undef MBOX_UPDATE_DWORDS
600
		return ret;
619
 
-
 
620
	for_each_ring(useless, dev_priv, i) {
601
 
621
		u32 mbox_reg = ring->signal_mbox[i];
-
 
622
		if (mbox_reg != GEN6_NOSYNC)
Line 602... Line -...
602
	mbox1_reg = ring->signal_mbox[0];
-
 
603
	mbox2_reg = ring->signal_mbox[1];
-
 
604
 
623
			update_mboxes(ring, mbox_reg);
605
	update_mboxes(ring, mbox1_reg);
624
	}
606
	update_mboxes(ring, mbox2_reg);
625
 
607
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
626
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
608
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
627
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Line 679... Line 698...
679
} while (0)
698
} while (0)
Line 680... Line 699...
680
 
699
 
681
static int
700
static int
682
pc_render_add_request(struct intel_ring_buffer *ring)
701
pc_render_add_request(struct intel_ring_buffer *ring)
683
{
-
 
684
	struct pipe_control *pc = ring->private;
702
{
685
	u32 scratch_addr = pc->gtt_offset + 128;
703
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
Line 686... Line 704...
686
	int ret;
704
	int ret;
687
 
705
 
688
	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
706
	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
Line 698... Line 716...
698
		return ret;
716
		return ret;
Line 699... Line 717...
699
 
717
 
700
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
718
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
701
			PIPE_CONTROL_WRITE_FLUSH |
719
			PIPE_CONTROL_WRITE_FLUSH |
702
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
720
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
703
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
721
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
704
	intel_ring_emit(ring, ring->outstanding_lazy_request);
722
	intel_ring_emit(ring, ring->outstanding_lazy_request);
705
	intel_ring_emit(ring, 0);
723
	intel_ring_emit(ring, 0);
706
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
724
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
707
	scratch_addr += 128; /* write to separate cachelines */
725
	scratch_addr += 128; /* write to separate cachelines */
Line 717... Line 735...
717
 
735
 
718
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
736
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
719
			PIPE_CONTROL_WRITE_FLUSH |
737
			PIPE_CONTROL_WRITE_FLUSH |
720
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
738
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
721
			PIPE_CONTROL_NOTIFY);
739
			PIPE_CONTROL_NOTIFY);
722
	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
740
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
723
	intel_ring_emit(ring, ring->outstanding_lazy_request);
741
	intel_ring_emit(ring, ring->outstanding_lazy_request);
724
	intel_ring_emit(ring, 0);
742
	intel_ring_emit(ring, 0);
Line 725... Line 743...
725
	intel_ring_advance(ring);
743
	intel_ring_advance(ring);
Line 751... Line 769...
751
}
769
}
Line 752... Line 770...
752
 
770
 
753
static u32
771
static u32
754
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
772
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
755
{
-
 
756
	struct pipe_control *pc = ring->private;
773
{
757
	return pc->cpu_page[0];
774
	return ring->scratch.cpu_page[0];
Line 758... Line 775...
758
}
775
}
759
 
776
 
760
static void
777
static void
761
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
-
 
762
{
778
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
763
	struct pipe_control *pc = ring->private;
779
{
Line 764... Line 780...
764
	pc->cpu_page[0] = seqno;
780
	ring->scratch.cpu_page[0] = seqno;
765
}
781
}
766
 
782
 
Line 773... Line 789...
773
 
789
 
774
	if (!dev->irq_enabled)
790
	if (!dev->irq_enabled)
Line 775... Line 791...
775
		return false;
791
		return false;
776
 
792
 
777
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
793
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
778
	if (ring->irq_refcount++ == 0) {
-
 
779
		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
-
 
780
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
781
	POSTING_READ(GTIMR);
794
	if (ring->irq_refcount++ == 0)
Line 782... Line 795...
782
	}
795
		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
783
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
796
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Line 791... Line 804...
791
	struct drm_device *dev = ring->dev;
804
	struct drm_device *dev = ring->dev;
792
	drm_i915_private_t *dev_priv = dev->dev_private;
805
	drm_i915_private_t *dev_priv = dev->dev_private;
793
	unsigned long flags;
806
	unsigned long flags;
Line 794... Line 807...
794
 
807
 
795
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
808
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
796
	if (--ring->irq_refcount == 0) {
809
	if (--ring->irq_refcount == 0)
797
		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
-
 
798
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
799
	POSTING_READ(GTIMR);
-
 
800
	}
810
		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
801
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
811
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Line 802... Line 812...
802
}
812
}
803
 
813
 
Line 879... Line 889...
879
{
889
{
880
	struct drm_device *dev = ring->dev;
890
	struct drm_device *dev = ring->dev;
881
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
891
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
882
	u32 mmio = 0;
892
	u32 mmio = 0;
Line 883... Line -...
883
 
-
 
884
    ENTER();
-
 
885
 
893
 
886
	/* The ring status page addresses are no longer next to the rest of
894
	/* The ring status page addresses are no longer next to the rest of
887
	 * the ring registers as of gen7.
895
	 * the ring registers as of gen7.
888
	 */
896
	 */
889
	if (IS_GEN7(dev)) {
897
	if (IS_GEN7(dev)) {
Line 895... Line 903...
895
			mmio = BLT_HWS_PGA_GEN7;
903
			mmio = BLT_HWS_PGA_GEN7;
896
			break;
904
			break;
897
		case VCS:
905
		case VCS:
898
			mmio = BSD_HWS_PGA_GEN7;
906
			mmio = BSD_HWS_PGA_GEN7;
899
			break;
907
			break;
-
 
908
		case VECS:
-
 
909
			mmio = VEBOX_HWS_PGA_GEN7;
-
 
910
			break;
900
		}
911
		}
901
	} else if (IS_GEN6(ring->dev)) {
912
	} else if (IS_GEN6(ring->dev)) {
902
		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
913
		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
903
	} else {
914
	} else {
904
		mmio = RING_HWS_PGA(ring->mmio_base);
915
		mmio = RING_HWS_PGA(ring->mmio_base);
905
	}
916
	}
Line 906... Line 917...
906
 
917
 
907
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
918
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
908
	POSTING_READ(mmio);
-
 
Line -... Line 919...
-
 
919
	POSTING_READ(mmio);
-
 
920
 
-
 
921
	/* Flush the TLB for this page */
-
 
922
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
923
		u32 reg = RING_INSTPM(ring->mmio_base);
-
 
924
		I915_WRITE(reg,
-
 
925
			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
-
 
926
					      INSTPM_SYNC_FLUSH));
-
 
927
		if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
-
 
928
			     1000))
-
 
929
			DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
909
    LEAVE();
930
				  ring->name);
Line 910... Line 931...
910
 
931
	}
911
}
932
}
912
 
933
 
Line 961... Line 982...
961
		gen6_gt_force_wake_get(dev_priv);
982
		gen6_gt_force_wake_get(dev_priv);
Line 962... Line 983...
962
 
983
 
963
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
984
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
964
	if (ring->irq_refcount++ == 0) {
985
	if (ring->irq_refcount++ == 0) {
-
 
986
		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
965
		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
987
			I915_WRITE_IMR(ring,
966
			I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
988
				       ~(ring->irq_enable_mask |
967
						GEN6_RENDER_L3_PARITY_ERROR));
989
					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
968
		else
990
		else
969
			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
991
			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
970
		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
-
 
971
		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
972
		POSTING_READ(GTIMR);
992
		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
973
	}
993
	}
Line 974... Line 994...
974
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
994
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
975
 
995
 
Line 984... Line 1004...
984
	unsigned long flags;
1004
	unsigned long flags;
Line 985... Line 1005...
985
 
1005
 
986
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1006
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
987
	if (--ring->irq_refcount == 0) {
1007
	if (--ring->irq_refcount == 0) {
-
 
1008
		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
988
		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1009
			I915_WRITE_IMR(ring,
989
			I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
1010
				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
990
		else
1011
		else
991
			I915_WRITE_IMR(ring, ~0);
1012
			I915_WRITE_IMR(ring, ~0);
992
		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
-
 
993
		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
994
		POSTING_READ(GTIMR);
1013
		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
995
	}
1014
	}
Line 996... Line 1015...
996
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1015
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
997
 
1016
 
Line -... Line 1017...
-
 
1017
		gen6_gt_force_wake_put(dev_priv);
-
 
1018
}
-
 
1019
 
-
 
1020
static bool
-
 
1021
hsw_vebox_get_irq(struct intel_ring_buffer *ring)
-
 
1022
{
-
 
1023
	struct drm_device *dev = ring->dev;
-
 
1024
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1025
	unsigned long flags;
-
 
1026
 
-
 
1027
	if (!dev->irq_enabled)
-
 
1028
		return false;
-
 
1029
 
-
 
1030
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
1031
	if (ring->irq_refcount++ == 0) {
-
 
1032
		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-
 
1033
		snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
-
 
1034
	}
-
 
1035
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
 
1036
 
-
 
1037
	return true;
-
 
1038
}
-
 
1039
 
-
 
1040
static void
-
 
1041
hsw_vebox_put_irq(struct intel_ring_buffer *ring)
-
 
1042
{
-
 
1043
	struct drm_device *dev = ring->dev;
-
 
1044
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1045
	unsigned long flags;
-
 
1046
 
-
 
1047
	if (!dev->irq_enabled)
-
 
1048
		return;
-
 
1049
 
-
 
1050
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
1051
	if (--ring->irq_refcount == 0) {
-
 
1052
		I915_WRITE_IMR(ring, ~0);
-
 
1053
		snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
-
 
1054
	}
998
		gen6_gt_force_wake_put(dev_priv);
1055
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
999
}
1056
}
1000
 
1057
 
1001
static int
1058
static int
1002
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1059
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
Line 1037... Line 1094...
1037
		intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1094
		intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1038
		intel_ring_emit(ring, offset + len - 8);
1095
		intel_ring_emit(ring, offset + len - 8);
1039
		intel_ring_emit(ring, MI_NOOP);
1096
		intel_ring_emit(ring, MI_NOOP);
1040
		intel_ring_advance(ring);
1097
		intel_ring_advance(ring);
1041
	} else {
1098
	} else {
1042
		struct drm_i915_gem_object *obj = ring->private;
-
 
1043
		u32 cs_offset = obj->gtt_offset;
1099
		u32 cs_offset = ring->scratch.gtt_offset;
Line 1044... Line 1100...
1044
 
1100
 
1045
		if (len > I830_BATCH_LIMIT)
1101
		if (len > I830_BATCH_LIMIT)
Line 1046... Line 1102...
1046
			return -ENOSPC;
1102
			return -ENOSPC;
Line 1118... Line 1174...
1118
		goto err;
1174
		goto err;
1119
	}
1175
	}
Line 1120... Line 1176...
1120
 
1176
 
Line 1121... Line 1177...
1121
	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1177
	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1122
 
1178
 
1123
	ret = i915_gem_object_pin(obj, 4096, true, false);
1179
	ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
1124
	if (ret != 0) {
1180
	if (ret != 0) {
Line 1125... Line 1181...
1125
		goto err_unref;
1181
		goto err_unref;
1126
	}
1182
	}
1127
 
1183
 
1128
	ring->status_page.gfx_addr = obj->gtt_offset;
1184
	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1129
    ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW);
1185
    ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW);
1130
	if (ring->status_page.page_addr == NULL) {
1186
	if (ring->status_page.page_addr == NULL) {
1131
		ret = -ENOMEM;
1187
		ret = -ENOMEM;
1132
		goto err_unpin;
1188
		goto err_unpin;
Line 1133... Line -...
1133
	}
-
 
1134
	ring->status_page.obj = obj;
1189
	}
1135
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1190
	ring->status_page.obj = obj;
Line 1136... Line 1191...
1136
 
1191
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Line 1146... Line 1201...
1146
	drm_gem_object_unreference(&obj->base);
1201
	drm_gem_object_unreference(&obj->base);
1147
err:
1202
err:
1148
	return ret;
1203
	return ret;
1149
}
1204
}
Line 1150... Line 1205...
1150
 
1205
 
1151
static int init_phys_hws_pga(struct intel_ring_buffer *ring)
1206
static int init_phys_status_page(struct intel_ring_buffer *ring)
1152
{
1207
{
1153
    struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
Line 1154... Line 1208...
1154
    u32 addr;
1208
    struct drm_i915_private *dev_priv = ring->dev->dev_private;
1155
 
1209
 
1156
    if (!dev_priv->status_page_dmah) {
1210
    if (!dev_priv->status_page_dmah) {
1157
        dev_priv->status_page_dmah =
1211
        dev_priv->status_page_dmah =
1158
            drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1212
            drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1159
        if (!dev_priv->status_page_dmah)
1213
        if (!dev_priv->status_page_dmah)
Line 1160... Line -...
1160
            return -ENOMEM;
-
 
1161
    }
-
 
1162
 
-
 
1163
    addr = dev_priv->status_page_dmah->busaddr;
-
 
1164
    if (INTEL_INFO(ring->dev)->gen >= 4)
-
 
1165
        addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
1214
            return -ENOMEM;
1166
    I915_WRITE(HWS_PGA, addr);
1215
    }
Line 1167... Line 1216...
1167
 
1216
 
1168
    ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1217
    ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
Line 1190... Line 1239...
1190
       ret = init_status_page(ring);
1239
       ret = init_status_page(ring);
1191
       if (ret)
1240
       if (ret)
1192
           return ret;
1241
           return ret;
1193
	} else {
1242
	} else {
1194
		BUG_ON(ring->id != RCS);
1243
		BUG_ON(ring->id != RCS);
1195
		ret = init_phys_hws_pga(ring);
1244
		ret = init_phys_status_page(ring);
1196
		if (ret)
1245
		if (ret)
1197
			return ret;
1246
			return ret;
1198
	}
1247
	}
Line 1199... Line 1248...
1199
 
1248
 
Line 1208... Line 1257...
1208
		goto err_hws;
1257
		goto err_hws;
1209
	}
1258
	}
Line 1210... Line 1259...
1210
 
1259
 
Line 1211... Line 1260...
1211
	ring->obj = obj;
1260
	ring->obj = obj;
1212
 
1261
 
1213
	ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
1262
	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
Line 1214... Line 1263...
1214
	if (ret)
1263
	if (ret)
1215
		goto err_unref;
1264
		goto err_unref;
1216
 
1265
 
Line 1217... Line 1266...
1217
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
1266
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
1218
	if (ret)
1267
	if (ret)
1219
		goto err_unpin;
1268
		goto err_unpin;
1220
 
1269
 
1221
	ring->virtual_start =
1270
	ring->virtual_start =
1222
		ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
1271
		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1223
			   ring->size);
1272
			   ring->size);
Line 1415... Line 1464...
1415
	u32 seqno;
1464
	u32 seqno;
1416
	int ret;
1465
	int ret;
Line 1417... Line 1466...
1417
 
1466
 
1418
	/* We need to add any requests required to flush the objects and ring */
1467
	/* We need to add any requests required to flush the objects and ring */
1419
	if (ring->outstanding_lazy_request) {
1468
	if (ring->outstanding_lazy_request) {
1420
		ret = i915_add_request(ring, NULL, NULL);
1469
		ret = i915_add_request(ring, NULL);
1421
		if (ret)
1470
		if (ret)
1422
			return ret;
1471
			return ret;
Line 1423... Line 1472...
1423
	}
1472
	}
Line 1489... Line 1538...
1489
	BUG_ON(ring->outstanding_lazy_request);
1538
	BUG_ON(ring->outstanding_lazy_request);
Line 1490... Line 1539...
1490
 
1539
 
1491
	if (INTEL_INFO(ring->dev)->gen >= 6) {
1540
	if (INTEL_INFO(ring->dev)->gen >= 6) {
1492
		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1541
		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
-
 
1542
		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
-
 
1543
		if (HAS_VEBOX(ring->dev))
1493
		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1544
			I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
Line 1494... Line 1545...
1494
	}
1545
	}
-
 
1546
 
1495
 
1547
	ring->set_seqno(ring, seqno);
Line 1496... Line 1548...
1496
	ring->set_seqno(ring, seqno);
1548
	ring->hangcheck.seqno = seqno;
1497
}
1549
}
1498
 
1550
 
Line 1538... Line 1590...
1538
	 */
1590
	 */
1539
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1591
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1540
		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1592
		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1541
}
1593
}
Line 1542... Line 1594...
1542
 
1594
 
1543
static int gen6_ring_flush(struct intel_ring_buffer *ring,
1595
static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1544
			   u32 invalidate, u32 flush)
1596
			   u32 invalidate, u32 flush)
1545
{
1597
{
1546
	uint32_t cmd;
1598
	uint32_t cmd;
Line 1610... Line 1662...
1610
       return 0;
1662
       return 0;
1611
}
1663
}
Line 1612... Line 1664...
1612
 
1664
 
Line 1613... Line 1665...
1613
/* Blitter support (SandyBridge+) */
1665
/* Blitter support (SandyBridge+) */
1614
 
1666
 
1615
static int blt_ring_flush(struct intel_ring_buffer *ring,
1667
static int gen6_ring_flush(struct intel_ring_buffer *ring,
-
 
1668
			  u32 invalidate, u32 flush)
1616
			  u32 invalidate, u32 flush)
1669
{
1617
{
1670
	struct drm_device *dev = ring->dev;
Line 1618... Line 1671...
1618
	uint32_t cmd;
1671
	uint32_t cmd;
1619
	int ret;
1672
	int ret;
Line 1635... Line 1688...
1635
	intel_ring_emit(ring, cmd);
1688
	intel_ring_emit(ring, cmd);
1636
	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1689
	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1637
	intel_ring_emit(ring, 0);
1690
	intel_ring_emit(ring, 0);
1638
	intel_ring_emit(ring, MI_NOOP);
1691
	intel_ring_emit(ring, MI_NOOP);
1639
	intel_ring_advance(ring);
1692
	intel_ring_advance(ring);
-
 
1693
 
-
 
1694
	if (IS_GEN7(dev) && flush)
-
 
1695
		return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
-
 
1696
 
1640
	return 0;
1697
	return 0;
1641
}
1698
}
Line 1642... Line 1699...
1642
 
1699
 
1643
int intel_init_render_ring_buffer(struct drm_device *dev)
1700
int intel_init_render_ring_buffer(struct drm_device *dev)
Line 1654... Line 1711...
1654
		ring->flush = gen7_render_ring_flush;
1711
		ring->flush = gen7_render_ring_flush;
1655
		if (INTEL_INFO(dev)->gen == 6)
1712
		if (INTEL_INFO(dev)->gen == 6)
1656
		ring->flush = gen6_render_ring_flush;
1713
		ring->flush = gen6_render_ring_flush;
1657
		ring->irq_get = gen6_ring_get_irq;
1714
		ring->irq_get = gen6_ring_get_irq;
1658
		ring->irq_put = gen6_ring_put_irq;
1715
		ring->irq_put = gen6_ring_put_irq;
1659
		ring->irq_enable_mask = GT_USER_INTERRUPT;
1716
		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1660
		ring->get_seqno = gen6_ring_get_seqno;
1717
		ring->get_seqno = gen6_ring_get_seqno;
1661
		ring->set_seqno = ring_set_seqno;
1718
		ring->set_seqno = ring_set_seqno;
1662
		ring->sync_to = gen6_ring_sync;
1719
		ring->sync_to = gen6_ring_sync;
1663
		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1720
		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1664
		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1721
		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
1665
		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1722
		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
-
 
1723
		ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
-
 
1724
		ring->signal_mbox[RCS] = GEN6_NOSYNC;
1666
		ring->signal_mbox[0] = GEN6_VRSYNC;
1725
		ring->signal_mbox[VCS] = GEN6_VRSYNC;
1667
		ring->signal_mbox[1] = GEN6_BRSYNC;
1726
		ring->signal_mbox[BCS] = GEN6_BRSYNC;
-
 
1727
		ring->signal_mbox[VECS] = GEN6_VERSYNC;
1668
	} else if (IS_GEN5(dev)) {
1728
	} else if (IS_GEN5(dev)) {
1669
       ring->add_request = pc_render_add_request;
1729
       ring->add_request = pc_render_add_request;
1670
		ring->flush = gen4_render_ring_flush;
1730
		ring->flush = gen4_render_ring_flush;
1671
		ring->get_seqno = pc_render_get_seqno;
1731
		ring->get_seqno = pc_render_get_seqno;
1672
		ring->set_seqno = pc_render_set_seqno;
1732
		ring->set_seqno = pc_render_set_seqno;
1673
		ring->irq_get = gen5_ring_get_irq;
1733
		ring->irq_get = gen5_ring_get_irq;
1674
		ring->irq_put = gen5_ring_put_irq;
1734
		ring->irq_put = gen5_ring_put_irq;
1675
		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1735
		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
-
 
1736
					GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
1676
	} else {
1737
	} else {
1677
		ring->add_request = i9xx_add_request;
1738
		ring->add_request = i9xx_add_request;
1678
		if (INTEL_INFO(dev)->gen < 4)
1739
		if (INTEL_INFO(dev)->gen < 4)
1679
			ring->flush = gen2_render_ring_flush;
1740
			ring->flush = gen2_render_ring_flush;
1680
		else
1741
		else
Line 1713... Line 1774...
1713
		if (obj == NULL) {
1774
		if (obj == NULL) {
1714
			DRM_ERROR("Failed to allocate batch bo\n");
1775
			DRM_ERROR("Failed to allocate batch bo\n");
1715
			return -ENOMEM;
1776
			return -ENOMEM;
1716
		}
1777
		}
Line 1717... Line 1778...
1717
 
1778
 
1718
		ret = i915_gem_object_pin(obj, 0, true, false);
1779
		ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1719
		if (ret != 0) {
1780
		if (ret != 0) {
1720
			drm_gem_object_unreference(&obj->base);
1781
			drm_gem_object_unreference(&obj->base);
1721
			DRM_ERROR("Failed to ping batch bo\n");
1782
			DRM_ERROR("Failed to ping batch bo\n");
1722
			return ret;
1783
			return ret;
Line 1723... Line 1784...
1723
		}
1784
		}
-
 
1785
 
1724
 
1786
		ring->scratch.obj = obj;
Line 1725... Line 1787...
1725
		ring->private = obj;
1787
		ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
1726
	}
1788
	}
Line 1787... Line 1849...
1787
			  " ring buffer\n");
1849
			  " ring buffer\n");
1788
		return -ENOMEM;
1850
		return -ENOMEM;
1789
	}
1851
	}
Line 1790... Line 1852...
1790
 
1852
 
1791
	if (!I915_NEED_GFX_HWS(dev)) {
1853
	if (!I915_NEED_GFX_HWS(dev)) {
1792
		ret = init_phys_hws_pga(ring);
1854
		ret = init_phys_status_page(ring);
1793
		if (ret)
1855
		if (ret)
1794
			return ret;
1856
			return ret;
Line 1795... Line 1857...
1795
	}
1857
	}
Line 1810... Line 1872...
1810
	if (IS_GEN6(dev) || IS_GEN7(dev)) {
1872
	if (IS_GEN6(dev) || IS_GEN7(dev)) {
1811
		ring->mmio_base = GEN6_BSD_RING_BASE;
1873
		ring->mmio_base = GEN6_BSD_RING_BASE;
1812
		/* gen6 bsd needs a special wa for tail updates */
1874
		/* gen6 bsd needs a special wa for tail updates */
1813
		if (IS_GEN6(dev))
1875
		if (IS_GEN6(dev))
1814
			ring->write_tail = gen6_bsd_ring_write_tail;
1876
			ring->write_tail = gen6_bsd_ring_write_tail;
1815
		ring->flush = gen6_ring_flush;
1877
		ring->flush = gen6_bsd_ring_flush;
1816
		ring->add_request = gen6_add_request;
1878
		ring->add_request = gen6_add_request;
1817
		ring->get_seqno = gen6_ring_get_seqno;
1879
		ring->get_seqno = gen6_ring_get_seqno;
1818
		ring->set_seqno = ring_set_seqno;
1880
		ring->set_seqno = ring_set_seqno;
1819
		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1881
		ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1820
		ring->irq_get = gen6_ring_get_irq;
1882
		ring->irq_get = gen6_ring_get_irq;
1821
		ring->irq_put = gen6_ring_put_irq;
1883
		ring->irq_put = gen6_ring_put_irq;
1822
		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1884
		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1823
		ring->sync_to = gen6_ring_sync;
1885
		ring->sync_to = gen6_ring_sync;
1824
		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1886
		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1825
		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1887
		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
1826
		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1888
		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
-
 
1889
		ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
1827
		ring->signal_mbox[0] = GEN6_RVSYNC;
1890
		ring->signal_mbox[RCS] = GEN6_RVSYNC;
-
 
1891
		ring->signal_mbox[VCS] = GEN6_NOSYNC;
1828
		ring->signal_mbox[1] = GEN6_BVSYNC;
1892
		ring->signal_mbox[BCS] = GEN6_BVSYNC;
-
 
1893
		ring->signal_mbox[VECS] = GEN6_VEVSYNC;
1829
	} else {
1894
	} else {
1830
		ring->mmio_base = BSD_RING_BASE;
1895
		ring->mmio_base = BSD_RING_BASE;
1831
		ring->flush = bsd_ring_flush;
1896
		ring->flush = bsd_ring_flush;
1832
		ring->add_request = i9xx_add_request;
1897
		ring->add_request = i9xx_add_request;
1833
		ring->get_seqno = ring_get_seqno;
1898
		ring->get_seqno = ring_get_seqno;
1834
		ring->set_seqno = ring_set_seqno;
1899
		ring->set_seqno = ring_set_seqno;
1835
		if (IS_GEN5(dev)) {
1900
		if (IS_GEN5(dev)) {
1836
			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1901
			ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1837
			ring->irq_get = gen5_ring_get_irq;
1902
			ring->irq_get = gen5_ring_get_irq;
1838
			ring->irq_put = gen5_ring_put_irq;
1903
			ring->irq_put = gen5_ring_put_irq;
1839
		} else {
1904
		} else {
1840
			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1905
			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1841
			ring->irq_get = i9xx_ring_get_irq;
1906
			ring->irq_get = i9xx_ring_get_irq;
Line 1856... Line 1921...
1856
	ring->name = "blitter ring";
1921
	ring->name = "blitter ring";
1857
	ring->id = BCS;
1922
	ring->id = BCS;
Line 1858... Line 1923...
1858
 
1923
 
1859
	ring->mmio_base = BLT_RING_BASE;
1924
	ring->mmio_base = BLT_RING_BASE;
1860
	ring->write_tail = ring_write_tail;
1925
	ring->write_tail = ring_write_tail;
1861
	ring->flush = blt_ring_flush;
1926
	ring->flush = gen6_ring_flush;
1862
	ring->add_request = gen6_add_request;
1927
	ring->add_request = gen6_add_request;
1863
	ring->get_seqno = gen6_ring_get_seqno;
1928
	ring->get_seqno = gen6_ring_get_seqno;
1864
	ring->set_seqno = ring_set_seqno;
1929
	ring->set_seqno = ring_set_seqno;
1865
	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1930
	ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1866
	ring->irq_get = gen6_ring_get_irq;
1931
	ring->irq_get = gen6_ring_get_irq;
1867
	ring->irq_put = gen6_ring_put_irq;
1932
	ring->irq_put = gen6_ring_put_irq;
1868
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1933
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1869
	ring->sync_to = gen6_ring_sync;
1934
	ring->sync_to = gen6_ring_sync;
1870
	ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1935
	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
1871
	ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1936
	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
-
 
1937
	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
1872
	ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1938
	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
1873
	ring->signal_mbox[0] = GEN6_RBSYNC;
1939
	ring->signal_mbox[RCS] = GEN6_RBSYNC;
-
 
1940
	ring->signal_mbox[VCS] = GEN6_VBSYNC;
-
 
1941
	ring->signal_mbox[BCS] = GEN6_NOSYNC;
-
 
1942
	ring->signal_mbox[VECS] = GEN6_VEBSYNC;
-
 
1943
	ring->init = init_ring_common;
-
 
1944
 
-
 
1945
	return intel_init_ring_buffer(dev, ring);
-
 
1946
}
-
 
1947
 
-
 
1948
int intel_init_vebox_ring_buffer(struct drm_device *dev)
-
 
1949
{
-
 
1950
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
1951
	struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
-
 
1952
 
-
 
1953
	ring->name = "video enhancement ring";
-
 
1954
	ring->id = VECS;
-
 
1955
 
-
 
1956
	ring->mmio_base = VEBOX_RING_BASE;
-
 
1957
	ring->write_tail = ring_write_tail;
-
 
1958
	ring->flush = gen6_ring_flush;
-
 
1959
	ring->add_request = gen6_add_request;
-
 
1960
	ring->get_seqno = gen6_ring_get_seqno;
-
 
1961
	ring->set_seqno = ring_set_seqno;
-
 
1962
	ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-
 
1963
	ring->irq_get = hsw_vebox_get_irq;
-
 
1964
	ring->irq_put = hsw_vebox_put_irq;
-
 
1965
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-
 
1966
	ring->sync_to = gen6_ring_sync;
-
 
1967
	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
-
 
1968
	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
-
 
1969
	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
-
 
1970
	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-
 
1971
	ring->signal_mbox[RCS] = GEN6_RVESYNC;
-
 
1972
	ring->signal_mbox[VCS] = GEN6_VVESYNC;
-
 
1973
	ring->signal_mbox[BCS] = GEN6_BVESYNC;
1874
	ring->signal_mbox[1] = GEN6_VBSYNC;
1974
	ring->signal_mbox[VECS] = GEN6_NOSYNC;
Line 1875... Line 1975...
1875
	ring->init = init_ring_common;
1975
	ring->init = init_ring_common;
1876
 
1976