Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4539 Rev 4560
Line 60... Line 60...
60
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63
};
63
};
Line 64... Line 64...
64
 
64
 
65
static const u32 hpd_status_gen4[] = {
65
static const u32 hpd_status_g4x[] = {
66
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
Line 280... Line 280...
280
				      pipe_name(pipe));
280
				      pipe_name(pipe));
281
	}
281
		}
282
	}
282
	}
283
}
283
}
Line -... Line 284...
-
 
284
 
-
 
285
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
-
 
286
						  enum pipe pipe, bool enable)
-
 
287
{
-
 
288
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
289
 
-
 
290
	assert_spin_locked(&dev_priv->irq_lock);
-
 
291
 
-
 
292
	if (enable)
-
 
293
		dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
-
 
294
	else
-
 
295
		dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
-
 
296
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
-
 
297
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
-
 
298
}
284
 
299
 
285
/**
300
/**
286
 * ibx_display_interrupt_update - update SDEIMR
301
 * ibx_display_interrupt_update - update SDEIMR
287
 * @dev_priv: driver private
302
 * @dev_priv: driver private
288
 * @interrupt_mask: mask of interrupt bits to update
303
 * @interrupt_mask: mask of interrupt bits to update
Line 392... Line 407...
392
 
407
 
393
	if (IS_GEN5(dev) || IS_GEN6(dev))
408
	if (IS_GEN5(dev) || IS_GEN6(dev))
394
		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
409
		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
395
	else if (IS_GEN7(dev))
410
	else if (IS_GEN7(dev))
-
 
411
		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
-
 
412
	else if (IS_GEN8(dev))
Line 396... Line 413...
396
		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
413
		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
397
 
414
 
398
done:
415
done:
399
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
416
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Line 452... Line 469...
452
	return ret;
469
	return ret;
453
}
470
}
Line 454... Line 471...
454
 
471
 
455
 
472
 
456
void
473
void
457
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
474
i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
458
{
475
{
Line 459... Line 476...
459
		u32 reg = PIPESTAT(pipe);
476
		u32 reg = PIPESTAT(pipe);
Line 469... Line 486...
469
	I915_WRITE(reg, pipestat);
486
	I915_WRITE(reg, pipestat);
470
		POSTING_READ(reg);
487
		POSTING_READ(reg);
471
}
488
}
Line 472... Line 489...
472
 
489
 
473
void
490
void
474
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
491
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
475
{
492
{
476
		u32 reg = PIPESTAT(pipe);
493
		u32 reg = PIPESTAT(pipe);
Line 477... Line 494...
477
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
494
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
Line 497... Line 514...
497
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
514
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
498
		return;
515
		return;
Line 499... Line 516...
499
 
516
 
Line 500... Line 517...
500
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
517
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
501
 
518
 
502
	i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
519
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
-
 
520
		if (INTEL_INFO(dev)->gen >= 4)
Line 503... Line 521...
503
		if (INTEL_INFO(dev)->gen >= 4)
521
		i915_enable_pipestat(dev_priv, PIPE_A,
504
		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
522
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
Line 505... Line 523...
505
 
523
 
Line 529... Line 547...
529
	} else {
547
	} else {
530
		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
548
		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
531
	}
549
	}
532
}
550
}
Line -... Line 551...
-
 
551
 
-
 
552
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
-
 
553
{
-
 
554
	/* Gen2 doesn't have a hardware frame counter */
-
 
555
	return 0;
-
 
556
}
533
 
557
 
534
/* Called from drm generic code, passed a 'crtc', which
558
/* Called from drm generic code, passed a 'crtc', which
535
 * we use as a pipe index
559
 * we use as a pipe index
536
 */
560
 */
537
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
561
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
538
{
562
{
539
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
563
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
540
	unsigned long high_frame;
564
	unsigned long high_frame;
541
	unsigned long low_frame;
565
	unsigned long low_frame;
Line 542... Line 566...
542
	u32 high1, high2, low;
566
	u32 high1, high2, low, pixel, vbl_start;
543
 
567
 
544
	if (!i915_pipe_enabled(dev, pipe)) {
568
	if (!i915_pipe_enabled(dev, pipe)) {
545
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
569
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
546
				"pipe %c\n", pipe_name(pipe));
570
				"pipe %c\n", pipe_name(pipe));
Line -... Line 571...
-
 
571
		return 0;
-
 
572
	}
-
 
573
 
-
 
574
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-
 
575
		struct intel_crtc *intel_crtc =
-
 
576
			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
 
577
		const struct drm_display_mode *mode =
-
 
578
			&intel_crtc->config.adjusted_mode;
-
 
579
 
-
 
580
		vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
-
 
581
	} else {
-
 
582
		enum transcoder cpu_transcoder =
-
 
583
			intel_pipe_to_cpu_transcoder(dev_priv, pipe);
-
 
584
		u32 htotal;
-
 
585
 
-
 
586
		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
-
 
587
		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
-
 
588
 
547
		return 0;
589
		vbl_start *= htotal;
548
	}
590
	}
Line 549... Line 591...
549
 
591
 
550
	high_frame = PIPEFRAME(pipe);
592
	high_frame = PIPEFRAME(pipe);
551
	low_frame = PIPEFRAMEPIXEL(pipe);
593
	low_frame = PIPEFRAMEPIXEL(pipe);
552
 
594
 
553
	/*
595
	/*
554
	 * High & low register fields aren't synchronized, so make sure
596
	 * High & low register fields aren't synchronized, so make sure
555
	 * we get a low value that's stable across two reads of the high
597
	 * we get a low value that's stable across two reads of the high
556
	 * register.
598
	 * register.
557
	 */
599
	 */
558
	do {
600
	do {
Line 559... Line 601...
559
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
601
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
-
 
602
		low   = I915_READ(low_frame);
560
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
603
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
-
 
604
	} while (high1 != high2);
-
 
605
 
-
 
606
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
-
 
607
	pixel = low & PIPE_PIXEL_MASK;
-
 
608
	low >>= PIPE_FRAME_LOW_SHIFT;
-
 
609
 
561
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
610
	/*
562
	} while (high1 != high2);
611
	 * The frame counter increments at beginning of active.
Line 563... Line 612...
563
 
612
	 * Cook up a vblank counter by also checking the pixel
564
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
613
	 * counter against vblank start.
565
	low >>= PIPE_FRAME_LOW_SHIFT;
614
	 */
Line 578... Line 627...
578
	}
627
	}
Line 579... Line 628...
579
 
628
 
580
	return I915_READ(reg);
629
	return I915_READ(reg);
Line -... Line 630...
-
 
630
}
-
 
631
 
-
 
632
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
-
 
633
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
-
 
634
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
-
 
635
 
-
 
636
static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
-
 
637
{
-
 
638
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
639
	uint32_t status;
-
 
640
 
-
 
641
	if (INTEL_INFO(dev)->gen < 7) {
-
 
642
		status = pipe == PIPE_A ?
-
 
643
			DE_PIPEA_VBLANK :
-
 
644
			DE_PIPEB_VBLANK;
-
 
645
	} else {
-
 
646
		switch (pipe) {
-
 
647
		default:
-
 
648
		case PIPE_A:
-
 
649
			status = DE_PIPEA_VBLANK_IVB;
-
 
650
			break;
-
 
651
		case PIPE_B:
-
 
652
			status = DE_PIPEB_VBLANK_IVB;
-
 
653
			break;
-
 
654
		case PIPE_C:
-
 
655
			status = DE_PIPEC_VBLANK_IVB;
-
 
656
			break;
-
 
657
		}
-
 
658
	}
-
 
659
 
-
 
660
	return __raw_i915_read32(dev_priv, DEISR) & status;
581
}
661
}
582
 
662
 
-
 
663
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
583
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
664
				    unsigned int flags, int *vpos, int *hpos,
584
			     int *vpos, int *hpos)
665
                    void *stime, void *etime)
-
 
666
{
-
 
667
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
668
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
585
{
669
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
586
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
670
	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
587
	u32 vbl = 0, position = 0;
671
	int position;
588
	int vbl_start, vbl_end, htotal, vtotal;
672
	int vbl_start, vbl_end, htotal, vtotal;
589
	bool in_vbl = true;
-
 
590
	int ret = 0;
673
	bool in_vbl = true;
Line 591... Line 674...
591
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
674
	int ret = 0;
592
								      pipe);
675
	unsigned long irqflags;
593
 
676
 
594
	if (!i915_pipe_enabled(dev, pipe)) {
677
	if (!intel_crtc->active) {
595
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
678
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Line -... Line 679...
-
 
679
				 "pipe %c\n", pipe_name(pipe));
596
				 "pipe %c\n", pipe_name(pipe));
680
		return 0;
-
 
681
	}
-
 
682
 
-
 
683
	htotal = mode->crtc_htotal;
-
 
684
	vtotal = mode->crtc_vtotal;
597
		return 0;
685
	vbl_start = mode->crtc_vblank_start;
-
 
686
	vbl_end = mode->crtc_vblank_end;
-
 
687
 
-
 
688
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
Line -... Line 689...
-
 
689
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
-
 
690
		vbl_end /= 2;
-
 
691
		vtotal /= 2;
-
 
692
	}
-
 
693
 
-
 
694
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
-
 
695
 
-
 
696
	/*
-
 
697
	 * Lock uncore.lock, as we will do multiple timing critical raw
-
 
698
	 * register reads, potentially with preemption disabled, so the
-
 
699
	 * following code must not block on uncore.lock.
-
 
700
	 */
598
	}
701
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
599
 
702
 
600
	/* Get vtotal. */
703
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
601
	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
704
 
-
 
705
 
-
 
706
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
-
 
707
		/* No obvious pixelcount register. Only query vertical
602
 
708
		 * scanout position from Display scan line register.
Line -... Line 709...
-
 
709
		 */
-
 
710
		if (IS_GEN2(dev))
-
 
711
			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
-
 
712
		else
-
 
713
			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
-
 
714
 
-
 
715
		if (HAS_PCH_SPLIT(dev)) {
-
 
716
			/*
-
 
717
			 * The scanline counter increments at the leading edge
-
 
718
			 * of hsync, ie. it completely misses the active portion
-
 
719
			 * of the line. Fix up the counter at both edges of vblank
-
 
720
			 * to get a more accurate picture whether we're in vblank
-
 
721
			 * or not.
-
 
722
			 */
603
	if (INTEL_INFO(dev)->gen >= 4) {
723
			in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
604
		/* No obvious pixelcount register. Only query vertical
724
			if ((in_vbl && position == vbl_start - 1) ||
-
 
725
			    (!in_vbl && position == vbl_end - 1))
-
 
726
				position = (position + 1) % vtotal;
-
 
727
		} else {
-
 
728
			/*
-
 
729
			 * ISR vblank status bits don't work the way we'd want
-
 
730
			 * them to work on non-PCH platforms (for
-
 
731
			 * ilk_pipe_in_vblank_locked()), and there doesn't
-
 
732
			 * appear any other way to determine if we're currently
-
 
733
			 * in vblank.
-
 
734
			 *
-
 
735
			 * Instead let's assume that we're already in vblank if
605
		 * scanout position from Display scan line register.
736
			 * we got called from the vblank interrupt and the
-
 
737
			 * scanline counter value indicates that we're on the
-
 
738
			 * line just prior to vblank start. This should result
606
		 */
739
			 * in the correct answer, unless the vblank interrupt
-
 
740
			 * delivery really got delayed for almost exactly one
-
 
741
			 * full frame/field.
607
		position = I915_READ(PIPEDSL(pipe));
742
			 */
-
 
743
			if (flags & DRM_CALLED_FROM_VBLIRQ &&
-
 
744
			    position == vbl_start - 1) {
608
 
745
				position = (position + 1) % vtotal;
609
		/* Decode into vertical scanout position. Don't have
746
 
610
		 * horizontal scanout position.
747
				/* Signal this correction as "applied". */
611
		 */
748
				ret |= 0x8;
612
		*vpos = position & 0x1fff;
749
			}
613
		*hpos = 0;
750
		}
Line 614... Line 751...
614
	} else {
751
	} else {
615
		/* Have access to pixelcount since start of frame.
752
		/* Have access to pixelcount since start of frame.
-
 
753
		 * We can split this into vertical and horizontal
616
		 * We can split this into vertical and horizontal
754
		 * scanout position.
617
		 * scanout position.
755
		 */
Line 618... Line -...
618
		 */
-
 
619
		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
-
 
Line 620... Line -...
620
 
-
 
621
		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
-
 
622
		*vpos = position / htotal;
-
 
623
		*hpos = position - (*vpos * htotal);
-
 
624
	}
-
 
625
 
-
 
626
	/* Query vblank area. */
-
 
627
	vbl = I915_READ(VBLANK(cpu_transcoder));
756
		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
628
 
-
 
629
	/* Test position against vblank region. */
-
 
Line -... Line 757...
-
 
757
 
-
 
758
		/* convert to pixel counts */
-
 
759
		vbl_start *= htotal;
-
 
760
		vbl_end *= htotal;
-
 
761
		vtotal *= htotal;
-
 
762
	}
-
 
763
 
-
 
764
 
630
	vbl_start = vbl & 0x1fff;
765
	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
-
 
766
 
-
 
767
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
 
768
 
-
 
769
	in_vbl = position >= vbl_start && position < vbl_end;
-
 
770
 
-
 
771
	/*
-
 
772
	 * While in vblank, position will be negative
-
 
773
	 * counting up towards 0 at vbl_end. And outside
631
	vbl_end = (vbl >> 16) & 0x1fff;
774
	 * vblank, position will be positive counting
-
 
775
	 * up since vbl_end.
-
 
776
	 */
632
 
777
	if (position >= vbl_start)
-
 
778
		position -= vbl_end;
Line 633... Line 779...
633
	if ((*vpos < vbl_start) || (*vpos > vbl_end))
779
	else
634
		in_vbl = false;
780
		position += vtotal - vbl_end;
635
 
781
 
Line 673... Line 819...
673
	}
819
	}
Line 674... Line 820...
674
 
820
 
675
	/* Helper routine in DRM core does all the work: */
821
	/* Helper routine in DRM core does all the work: */
676
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
822
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
677
						     vblank_time, flags,
823
						     vblank_time, flags,
-
 
824
						     crtc,
678
						     crtc);
825
						     &to_intel_crtc(crtc)->config.adjusted_mode);
Line 679... Line 826...
679
}
826
}
-
 
827
 
680
 
828
static bool intel_hpd_irq_event(struct drm_device *dev,
681
static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
829
				struct drm_connector *connector)
Line 682... Line 830...
682
{
830
{
683
	enum drm_connector_status old_status;
831
	enum drm_connector_status old_status;
Line 684... Line 832...
684
 
832
 
-
 
833
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-
 
834
	old_status = connector->status;
-
 
835
 
685
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
836
	connector->status = connector->funcs->detect(connector, false);
686
	old_status = connector->status;
837
	if (old_status == connector->status)
687
 
838
		return false;
688
	connector->status = connector->funcs->detect(connector, false);
839
 
689
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
840
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
-
 
841
		      connector->base.id,
-
 
842
		      drm_get_connector_name(connector),
690
		      connector->base.id,
843
		      drm_get_connector_status_name(old_status),
Line 691... Line 844...
691
		      drm_get_connector_name(connector),
844
		      drm_get_connector_status_name(connector->status));
692
		      old_status, connector->status);
845
 
693
	return (old_status != connector->status);
846
	return true;
Line 812... Line 965...
812
			struct intel_ring_buffer *ring)
965
			struct intel_ring_buffer *ring)
813
{
966
{
814
	if (ring->obj == NULL)
967
	if (ring->obj == NULL)
815
		return;
968
		return;
Line 816... Line 969...
816
 
969
 
Line 817... Line 970...
817
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
970
	trace_i915_gem_request_complete(ring);
818
 
971
 
Line 819... Line 972...
819
	wake_up_all(&ring->irq_queue);
972
	wake_up_all(&ring->irq_queue);
820
}
973
}
821
 
974
 
822
static void gen6_pm_rps_work(struct work_struct *work)
975
static void gen6_pm_rps_work(struct work_struct *work)
823
{
976
{
824
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
977
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Line 825... Line 978...
825
						    rps.work);
978
						    rps.work);
826
	u32 pm_iir;
979
	u32 pm_iir;
827
	u8 new_delay;
980
	int new_delay, adj;
828
 
981
 
Line 839... Line 992...
839
	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
992
	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
840
		return;
993
		return;
Line 841... Line 994...
841
 
994
 
Line -... Line 995...
-
 
995
	mutex_lock(&dev_priv->rps.hw_lock);
842
	mutex_lock(&dev_priv->rps.hw_lock);
996
 
-
 
997
	adj = dev_priv->rps.last_adj;
-
 
998
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-
 
999
		if (adj > 0)
-
 
1000
			adj *= 2;
843
 
1001
		else
Line 844... Line 1002...
844
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1002
			adj = 1;
845
		new_delay = dev_priv->rps.cur_delay + 1;
1003
		new_delay = dev_priv->rps.cur_delay + adj;
846
 
1004
 
847
		/*
1005
		/*
848
		 * For better performance, jump directly
-
 
849
		 * to RPe if we're below it.
1006
		 * For better performance, jump directly
850
		 */
1007
		 * to RPe if we're below it.
-
 
1008
		 */
-
 
1009
		if (new_delay < dev_priv->rps.rpe_delay)
-
 
1010
			new_delay = dev_priv->rps.rpe_delay;
851
		if (IS_VALLEYVIEW(dev_priv->dev) &&
1011
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
-
 
1012
		if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
-
 
1013
			new_delay = dev_priv->rps.rpe_delay;
-
 
1014
		else
-
 
1015
			new_delay = dev_priv->rps.min_delay;
-
 
1016
		adj = 0;
-
 
1017
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
-
 
1018
		if (adj < 0)
852
		    dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
1019
			adj *= 2;
-
 
1020
		else
-
 
1021
			adj = -1;
-
 
1022
		new_delay = dev_priv->rps.cur_delay + adj;
Line 853... Line 1023...
853
			new_delay = dev_priv->rps.rpe_delay;
1023
	} else { /* unknown event */
854
	} else
1024
		new_delay = dev_priv->rps.cur_delay;
855
		new_delay = dev_priv->rps.cur_delay - 1;
1025
	}
856
 
1026
 
857
	/* sysfs frequency interfaces may have snuck in while servicing the
1027
	/* sysfs frequency interfaces may have snuck in while servicing the
-
 
1028
	 * interrupt
-
 
1029
	 */
858
	 * interrupt
1030
	new_delay = clamp_t(int, new_delay,
859
	 */
1031
			    dev_priv->rps.min_delay, dev_priv->rps.max_delay);
860
	if (new_delay >= dev_priv->rps.min_delay &&
1032
	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
861
	    new_delay <= dev_priv->rps.max_delay) {
1033
 
862
		if (IS_VALLEYVIEW(dev_priv->dev))
-
 
863
			valleyview_set_rps(dev_priv->dev, new_delay);
-
 
864
		else
-
 
865
		gen6_set_rps(dev_priv->dev, new_delay);
-
 
866
	}
-
 
867
 
-
 
868
	if (IS_VALLEYVIEW(dev_priv->dev)) {
-
 
869
		/*
-
 
870
		 * On VLV, when we enter RC6 we may not be at the minimum
-
 
871
		 * voltage level, so arm a timer to check.  It should only
-
 
872
		 * fire when there's activity or once after we've entered
-
 
873
		 * RC6, and then won't be re-armed until the next RPS interrupt.
-
 
Line 874... Line 1034...
874
		 */
1034
		if (IS_VALLEYVIEW(dev_priv->dev))
875
//		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
1035
			valleyview_set_rps(dev_priv->dev, new_delay);
Line 892... Line 1052...
892
static void ivybridge_parity_work(struct work_struct *work)
1052
static void ivybridge_parity_work(struct work_struct *work)
893
{
1053
{
894
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
1054
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
895
						    l3_parity.error_work);
1055
						    l3_parity.error_work);
896
	u32 error_status, row, bank, subbank;
1056
	u32 error_status, row, bank, subbank;
897
	char *parity_event[5];
1057
	char *parity_event[6];
898
	uint32_t misccpctl;
1058
	uint32_t misccpctl;
899
	unsigned long flags;
1059
	unsigned long flags;
-
 
1060
	uint8_t slice = 0;
Line 900... Line 1061...
900
 
1061
 
901
	/* We must turn off DOP level clock gating to access the L3 registers.
1062
	/* We must turn off DOP level clock gating to access the L3 registers.
902
	 * In order to prevent a get/put style interface, acquire struct mutex
1063
	 * In order to prevent a get/put style interface, acquire struct mutex
903
	 * any time we access those registers.
1064
	 * any time we access those registers.
904
	 */
1065
	 */
Line -... Line 1066...
-
 
1066
	mutex_lock(&dev_priv->dev->struct_mutex);
-
 
1067
 
-
 
1068
	/* If we've screwed up tracking, just let the interrupt fire again */
-
 
1069
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
905
	mutex_lock(&dev_priv->dev->struct_mutex);
1070
		goto out;
906
 
1071
 
907
	misccpctl = I915_READ(GEN7_MISCCPCTL);
1072
	misccpctl = I915_READ(GEN7_MISCCPCTL);
Line -... Line 1073...
-
 
1073
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-
 
1074
	POSTING_READ(GEN7_MISCCPCTL);
-
 
1075
 
-
 
1076
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
-
 
1077
		u32 reg;
-
 
1078
 
-
 
1079
		slice--;
-
 
1080
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
-
 
1081
			break;
-
 
1082
 
-
 
1083
		dev_priv->l3_parity.which_slice &= ~(1<
908
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1084
 
909
	POSTING_READ(GEN7_MISCCPCTL);
1085
		reg = GEN7_L3CDERRST1 + (slice * 0x200);
910
 
1086
 
911
	error_status = I915_READ(GEN7_L3CDERRST1);
1087
		error_status = I915_READ(reg);
Line 912... Line 1088...
912
	row = GEN7_PARITY_ERROR_ROW(error_status);
1088
	row = GEN7_PARITY_ERROR_ROW(error_status);
913
	bank = GEN7_PARITY_ERROR_BANK(error_status);
1089
	bank = GEN7_PARITY_ERROR_BANK(error_status);
-
 
1090
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
-
 
1091
 
914
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1092
		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
-
 
1093
		POSTING_READ(reg);
-
 
1094
 
Line 915... Line 1095...
915
 
1095
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
Line -... Line 1096...
-
 
1096
			  slice, row, bank, subbank);
-
 
1097
 
916
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
1098
	}
917
				    GEN7_L3CDERRST1_ENABLE);
1099
 
918
	POSTING_READ(GEN7_L3CDERRST1);
1100
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
Line 919... Line 1101...
919
 
1101
 
920
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
-
 
921
 
-
 
922
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
923
	ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-
 
924
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1102
out:
Line 925... Line 1103...
925
 
1103
	WARN_ON(dev_priv->l3_parity.which_slice);
926
	mutex_unlock(&dev_priv->dev->struct_mutex);
1104
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
927
 
1105
	ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
Line 928... Line 1106...
928
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
1106
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
929
		  row, bank, subbank);
1107
 
Line 930... Line 1108...
930
 
1108
	mutex_unlock(&dev_priv->dev->struct_mutex);
931
}
1109
}
932
 
1110
 
Line -... Line 1111...
-
 
1111
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
-
 
1112
{
-
 
1113
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
1114
 
-
 
1115
	if (!HAS_L3_DPF(dev))
-
 
1116
		return;
-
 
1117
 
933
static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
1118
	spin_lock(&dev_priv->irq_lock);
934
{
1119
	ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
Line 935... Line 1120...
935
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1120
	spin_unlock(&dev_priv->irq_lock);
936
 
1121
 
Line 973... Line 1158...
973
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1158
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
974
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
1159
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
975
		i915_handle_error(dev, false);
1160
		i915_handle_error(dev, false);
976
	}
1161
	}
Line 977... Line 1162...
977
 
1162
 
978
	if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1163
	if (gt_iir & GT_PARITY_ERROR(dev))
-
 
1164
		ivybridge_parity_error_irq_handler(dev, gt_iir);
-
 
1165
}
-
 
1166
 
-
 
1167
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
-
 
1168
				       struct drm_i915_private *dev_priv,
-
 
1169
				       u32 master_ctl)
-
 
1170
{
-
 
1171
	u32 rcs, bcs, vcs;
-
 
1172
	uint32_t tmp = 0;
-
 
1173
	irqreturn_t ret = IRQ_NONE;
-
 
1174
 
-
 
1175
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
-
 
1176
		tmp = I915_READ(GEN8_GT_IIR(0));
-
 
1177
		if (tmp) {
-
 
1178
			ret = IRQ_HANDLED;
-
 
1179
			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
-
 
1180
			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
-
 
1181
			if (rcs & GT_RENDER_USER_INTERRUPT)
-
 
1182
				notify_ring(dev, &dev_priv->ring[RCS]);
-
 
1183
			if (bcs & GT_RENDER_USER_INTERRUPT)
-
 
1184
				notify_ring(dev, &dev_priv->ring[BCS]);
-
 
1185
			I915_WRITE(GEN8_GT_IIR(0), tmp);
-
 
1186
		} else
-
 
1187
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
-
 
1188
	}
-
 
1189
 
-
 
1190
	if (master_ctl & GEN8_GT_VCS1_IRQ) {
-
 
1191
		tmp = I915_READ(GEN8_GT_IIR(1));
-
 
1192
		if (tmp) {
-
 
1193
			ret = IRQ_HANDLED;
-
 
1194
			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
-
 
1195
			if (vcs & GT_RENDER_USER_INTERRUPT)
-
 
1196
				notify_ring(dev, &dev_priv->ring[VCS]);
-
 
1197
			I915_WRITE(GEN8_GT_IIR(1), tmp);
-
 
1198
		} else
-
 
1199
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
-
 
1200
	}
-
 
1201
 
-
 
1202
	if (master_ctl & GEN8_GT_VECS_IRQ) {
-
 
1203
		tmp = I915_READ(GEN8_GT_IIR(3));
-
 
1204
		if (tmp) {
-
 
1205
			ret = IRQ_HANDLED;
-
 
1206
			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
-
 
1207
			if (vcs & GT_RENDER_USER_INTERRUPT)
-
 
1208
				notify_ring(dev, &dev_priv->ring[VECS]);
-
 
1209
			I915_WRITE(GEN8_GT_IIR(3), tmp);
-
 
1210
		} else
-
 
1211
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
-
 
1212
	}
-
 
1213
 
979
		ivybridge_parity_error_irq_handler(dev);
1214
	return ret;
Line 980... Line 1215...
980
}
1215
}
981
 
1216
 
Line 994... Line 1229...
994
		return;
1229
		return;
Line 995... Line 1230...
995
 
1230
 
996
	spin_lock(&dev_priv->irq_lock);
1231
	spin_lock(&dev_priv->irq_lock);
Line 997... Line 1232...
997
	for (i = 1; i < HPD_NUM_PINS; i++) {
1232
	for (i = 1; i < HPD_NUM_PINS; i++) {
998
 
1233
 
999
		WARN(((hpd[i] & hotplug_trigger) &&
1234
		WARN_ONCE(hpd[i] & hotplug_trigger &&
-
 
1235
			  dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
Line 1000... Line 1236...
1000
		      dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1236
			  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1001
		     "Received HPD interrupt although disabled\n");
1237
			  hotplug_trigger, i, hpd[i]);
1002
 
1238
 
Line 1048... Line 1284...
1048
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1284
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
Line 1049... Line 1285...
1049
 
1285
 
1050
	wake_up_all(&dev_priv->gmbus_wait_queue);
1286
	wake_up_all(&dev_priv->gmbus_wait_queue);
Line -... Line 1287...
-
 
1287
}
-
 
1288
 
-
 
1289
#if defined(CONFIG_DEBUG_FS)
-
 
1290
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
-
 
1291
					 uint32_t crc0, uint32_t crc1,
-
 
1292
					 uint32_t crc2, uint32_t crc3,
-
 
1293
					 uint32_t crc4)
-
 
1294
{
-
 
1295
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1296
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
 
1297
	struct intel_pipe_crc_entry *entry;
-
 
1298
	int head, tail;
-
 
1299
 
-
 
1300
	spin_lock(&pipe_crc->lock);
-
 
1301
 
-
 
1302
	if (!pipe_crc->entries) {
-
 
1303
		spin_unlock(&pipe_crc->lock);
-
 
1304
		DRM_ERROR("spurious interrupt\n");
-
 
1305
		return;
-
 
1306
	}
-
 
1307
 
-
 
1308
	head = pipe_crc->head;
-
 
1309
	tail = pipe_crc->tail;
-
 
1310
 
-
 
1311
	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
-
 
1312
		spin_unlock(&pipe_crc->lock);
-
 
1313
		DRM_ERROR("CRC buffer overflowing\n");
-
 
1314
		return;
-
 
1315
	}
-
 
1316
 
-
 
1317
	entry = &pipe_crc->entries[head];
-
 
1318
 
-
 
1319
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
-
 
1320
	entry->crc[0] = crc0;
-
 
1321
	entry->crc[1] = crc1;
-
 
1322
	entry->crc[2] = crc2;
-
 
1323
	entry->crc[3] = crc3;
-
 
1324
	entry->crc[4] = crc4;
-
 
1325
 
-
 
1326
	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
-
 
1327
	pipe_crc->head = head;
-
 
1328
 
-
 
1329
	spin_unlock(&pipe_crc->lock);
-
 
1330
 
-
 
1331
	wake_up_interruptible(&pipe_crc->wq);
-
 
1332
}
-
 
1333
#else
-
 
1334
static inline void
-
 
1335
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
-
 
1336
			     uint32_t crc0, uint32_t crc1,
-
 
1337
			     uint32_t crc2, uint32_t crc3,
-
 
1338
			     uint32_t crc4) {}
-
 
1339
#endif
-
 
1340
 
-
 
1341
 
-
 
1342
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
-
 
1343
{
-
 
1344
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1345
 
-
 
1346
	display_pipe_crc_irq_handler(dev, pipe,
-
 
1347
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
-
 
1348
				     0, 0, 0, 0);
-
 
1349
}
-
 
1350
 
-
 
1351
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
-
 
1352
{
-
 
1353
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1354
 
-
 
1355
	display_pipe_crc_irq_handler(dev, pipe,
-
 
1356
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
-
 
1357
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
-
 
1358
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
-
 
1359
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
-
 
1360
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
-
 
1361
}
-
 
1362
 
-
 
1363
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
-
 
1364
{
-
 
1365
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1366
	uint32_t res1, res2;
-
 
1367
 
-
 
1368
	if (INTEL_INFO(dev)->gen >= 3)
-
 
1369
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
-
 
1370
	else
-
 
1371
		res1 = 0;
-
 
1372
 
-
 
1373
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
-
 
1374
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
-
 
1375
	else
-
 
1376
		res2 = 0;
-
 
1377
 
-
 
1378
	display_pipe_crc_irq_handler(dev, pipe,
-
 
1379
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
-
 
1380
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
-
 
1381
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
-
 
1382
				     res1, res2);
1051
}
1383
}
1052
 
1384
 
1053
/* The RPS events need forcewake, so we add them to a work queue and mask their
1385
/* The RPS events need forcewake, so we add them to a work queue and mask their
1054
 * IMR bits until the work is done. Other interrupts can be processed without
1386
 * IMR bits until the work is done. Other interrupts can be processed without
1055
 * the work queue. */
1387
 * the work queue. */
Line 1114... Line 1446...
1114
				I915_WRITE(reg, pipe_stats[pipe]);
1446
				I915_WRITE(reg, pipe_stats[pipe]);
1115
			}
1447
			}
1116
		}
1448
		}
1117
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1449
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Line 1118... Line -...
1118
 
-
 
1119
#if 0
1450
 
1120
		for_each_pipe(pipe) {
1451
		for_each_pipe(pipe) {
1121
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1452
//			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
Line 1122... Line 1453...
1122
				drm_handle_vblank(dev, pipe);
1453
//				drm_handle_vblank(dev, pipe);
1123
 
1454
 
1124
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1455
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1125
				intel_prepare_page_flip(dev, pipe);
1456
//				intel_prepare_page_flip(dev, pipe);
-
 
1457
//				intel_finish_page_flip(dev, pipe);
-
 
1458
			}
-
 
1459
 
1126
				intel_finish_page_flip(dev, pipe);
1460
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1127
			}
-
 
Line 1128... Line 1461...
1128
		}
1461
				i9xx_pipe_crc_irq_handler(dev, pipe);
1129
#endif
1462
		}
1130
 
1463
 
1131
		/* Consume port.  Then clear IIR or we'll miss events */
1464
		/* Consume port.  Then clear IIR or we'll miss events */
Line 1136... Line 1469...
1136
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1469
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1137
					 hotplug_status);
1470
					 hotplug_status);
Line 1138... Line 1471...
1138
 
1471
 
Line -... Line 1472...
-
 
1472
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
-
 
1473
 
-
 
1474
			if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1139
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1475
				dp_aux_irq_handler(dev);
1140
 
1476
 
1141
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1477
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Line 1142... Line 1478...
1142
			I915_READ(PORT_HOTPLUG_STAT);
1478
			I915_READ(PORT_HOTPLUG_STAT);
Line 1212... Line 1548...
1212
 
1548
 
1213
static void ivb_err_int_handler(struct drm_device *dev)
1549
static void ivb_err_int_handler(struct drm_device *dev)
1214
{
1550
{
1215
	struct drm_i915_private *dev_priv = dev->dev_private;
1551
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1552
	u32 err_int = I915_READ(GEN7_ERR_INT);
Line 1216... Line 1553...
1216
	u32 err_int = I915_READ(GEN7_ERR_INT);
1553
	enum pipe pipe;
1217
 
1554
 
Line -... Line 1555...
-
 
1555
	if (err_int & ERR_INT_POISON)
1218
	if (err_int & ERR_INT_POISON)
1556
		DRM_ERROR("Poison interrupt\n");
1219
		DRM_ERROR("Poison interrupt\n");
1557
 
-
 
1558
	for_each_pipe(pipe) {
1220
 
1559
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
-
 
1560
			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
-
 
1561
								  false))
1221
	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1562
				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1222
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1563
						 pipe_name(pipe));
-
 
1564
		}
1223
			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1565
 
-
 
1566
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1224
 
1567
			if (IS_IVYBRIDGE(dev))
-
 
1568
				ivb_pipe_crc_irq_handler(dev, pipe);
1225
	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1569
			else
1226
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-
 
1227
			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
-
 
1228
 
-
 
Line 1229... Line 1570...
1229
	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1570
				hsw_pipe_crc_irq_handler(dev, pipe);
1230
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1571
		}
Line 1231... Line 1572...
1231
			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1572
	}
Line 1297... Line 1638...
1297
}
1638
}
Line 1298... Line 1639...
1298
 
1639
 
1299
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1640
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1300
{
1641
{
-
 
1642
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1301... Line 1643...
1301
	struct drm_i915_private *dev_priv = dev->dev_private;
1643
	enum pipe pipe;
1302
 
1644
 
Line 1303... Line 1645...
1303
	if (de_iir & DE_AUX_CHANNEL_A)
1645
	if (de_iir & DE_AUX_CHANNEL_A)
1304
		dp_aux_irq_handler(dev);
1646
		dp_aux_irq_handler(dev);
Line 1305... Line -...
1305
 
-
 
1306
	if (de_iir & DE_GSE)
-
 
1307
		intel_opregion_asle_intr(dev);
-
 
1308
 
-
 
1309
#if 0
-
 
1310
	if (de_iir & DE_PIPEA_VBLANK)
-
 
1311
		drm_handle_vblank(dev, 0);
-
 
1312
 
-
 
1313
	if (de_iir & DE_PIPEB_VBLANK)
1647
 
1314
		drm_handle_vblank(dev, 1);
1648
	if (de_iir & DE_GSE)
Line 1315... Line 1649...
1315
#endif
1649
		intel_opregion_asle_intr(dev);
1316
 
1650
 
1317
	if (de_iir & DE_POISON)
1651
	if (de_iir & DE_POISON)
1318
		DRM_ERROR("Poison interrupt\n");
1652
		DRM_ERROR("Poison interrupt\n");
1319
 
1653
 
1320
	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1654
	for_each_pipe(pipe) {
1321
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1655
//		if (de_iir & DE_PIPE_VBLANK(pipe))
1322
			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
 
1323
 
-
 
1324
	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1656
//			drm_handle_vblank(dev, pipe);
1325
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-
 
1326
			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
-
 
Line -... Line 1657...
-
 
1657
 
-
 
1658
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
-
 
1659
			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
-
 
1660
				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1327
#if 0
1661
						 pipe_name(pipe));
1328
	if (de_iir & DE_PLANEA_FLIP_DONE) {
1662
 
1329
		intel_prepare_page_flip(dev, 0);
1663
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
-
 
1664
			i9xx_pipe_crc_irq_handler(dev, pipe);
1330
		intel_finish_page_flip_plane(dev, 0);
1665
 
1331
	}
-
 
Line 1332... Line 1666...
1332
 
1666
		/* plane/pipes map 1:1 on ilk+ */
1333
	if (de_iir & DE_PLANEB_FLIP_DONE) {
1667
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1334
		intel_prepare_page_flip(dev, 1);
1668
//			intel_prepare_page_flip(dev, pipe);
Line 1354... Line 1688...
1354
}
1688
}
Line 1355... Line 1689...
1355
 
1689
 
1356
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1690
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1357
{
1691
{
1358
	struct drm_i915_private *dev_priv = dev->dev_private;
1692
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1359... Line 1693...
1359
	int i;
1693
	enum pipe i;
1360
 
1694
 
Line 1361... Line 1695...
1361
	if (de_iir & DE_ERR_INT_IVB)
1695
	if (de_iir & DE_ERR_INT_IVB)
1362
		ivb_err_int_handler(dev);
1696
		ivb_err_int_handler(dev);
Line 1363... Line 1697...
1363
 
1697
 
1364
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1698
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1365
		dp_aux_irq_handler(dev);
1699
		dp_aux_irq_handler(dev);
1366
 
1700
 
1367
	if (de_iir & DE_GSE_IVB)
1701
	if (de_iir & DE_GSE_IVB)
1368
		intel_opregion_asle_intr(dev);
1702
		intel_opregion_asle_intr(dev);
-
 
1703
 
-
 
1704
	for_each_pipe(i) {
1369
#if 0
1705
//		if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
1370
	for (i = 0; i < 3; i++) {
1706
//			drm_handle_vblank(dev, i);
1371
		if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1707
 
1372
			drm_handle_vblank(dev, i);
1708
		/* plane/pipes map 1:1 on ilk+ */
1373
		if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1709
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
1374
			intel_prepare_page_flip(dev, i);
-
 
Line 1375... Line 1710...
1375
			intel_finish_page_flip_plane(dev, i);
1710
//			intel_prepare_page_flip(dev, i);
1376
		}
1711
//			intel_finish_page_flip_plane(dev, i);
1377
	}
1712
		}
Line 1392... Line 1727...
1392
{
1727
{
1393
	struct drm_device *dev = (struct drm_device *) arg;
1728
	struct drm_device *dev = (struct drm_device *) arg;
1394
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1729
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1395
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1730
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1396
	irqreturn_t ret = IRQ_NONE;
1731
	irqreturn_t ret = IRQ_NONE;
1397
	bool err_int_reenable = false;
-
 
Line 1398... Line 1732...
1398
 
1732
 
Line 1399... Line 1733...
1399
	atomic_inc(&dev_priv->irq_received);
1733
	atomic_inc(&dev_priv->irq_received);
1400
 
1734
 
Line 1416... Line 1750...
1416
		sde_ier = I915_READ(SDEIER);
1750
		sde_ier = I915_READ(SDEIER);
1417
		I915_WRITE(SDEIER, 0);
1751
		I915_WRITE(SDEIER, 0);
1418
		POSTING_READ(SDEIER);
1752
		POSTING_READ(SDEIER);
1419
	}
1753
	}
Line 1420... Line -...
1420
 
-
 
1421
	/* On Haswell, also mask ERR_INT because we don't want to risk
-
 
1422
	 * generating "unclaimed register" interrupts from inside the interrupt
-
 
1423
	 * handler. */
-
 
1424
	if (IS_HASWELL(dev)) {
-
 
1425
		spin_lock(&dev_priv->irq_lock);
-
 
1426
		err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
-
 
1427
		if (err_int_reenable)
-
 
1428
			ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
1429
		spin_unlock(&dev_priv->irq_lock);
-
 
1430
	}
-
 
1431
 
1754
 
1432
	gt_iir = I915_READ(GTIIR);
1755
	gt_iir = I915_READ(GTIIR);
1433
	if (gt_iir) {
1756
	if (gt_iir) {
1434
		if (INTEL_INFO(dev)->gen >= 6)
1757
		if (INTEL_INFO(dev)->gen >= 6)
1435
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1758
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
Line 1456... Line 1779...
1456
			I915_WRITE(GEN6_PMIIR, pm_iir);
1779
			I915_WRITE(GEN6_PMIIR, pm_iir);
1457
			ret = IRQ_HANDLED;
1780
			ret = IRQ_HANDLED;
1458
	}
1781
		}
1459
	}
1782
	}
Line 1460... Line -...
1460
 
-
 
1461
	if (err_int_reenable) {
-
 
1462
		spin_lock(&dev_priv->irq_lock);
-
 
1463
		if (ivb_can_enable_err_int(dev))
-
 
1464
			ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
1465
		spin_unlock(&dev_priv->irq_lock);
-
 
1466
	}
-
 
1467
 
1783
 
1468
	I915_WRITE(DEIER, de_ier);
1784
	I915_WRITE(DEIER, de_ier);
1469
	POSTING_READ(DEIER);
1785
	POSTING_READ(DEIER);
1470
	if (!HAS_PCH_NOP(dev)) {
1786
	if (!HAS_PCH_NOP(dev)) {
1471
		I915_WRITE(SDEIER, sde_ier);
1787
		I915_WRITE(SDEIER, sde_ier);
1472
		POSTING_READ(SDEIER);
1788
		POSTING_READ(SDEIER);
Line 1473... Line 1789...
1473
	}
1789
	}
1474
 
1790
 
Line -... Line 1791...
-
 
1791
	return ret;
-
 
1792
}
-
 
1793
 
-
 
1794
static irqreturn_t gen8_irq_handler(int irq, void *arg)
-
 
1795
{
-
 
1796
	struct drm_device *dev = arg;
-
 
1797
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1798
	u32 master_ctl;
-
 
1799
	irqreturn_t ret = IRQ_NONE;
-
 
1800
	uint32_t tmp = 0;
-
 
1801
	enum pipe pipe;
-
 
1802
 
-
 
1803
	atomic_inc(&dev_priv->irq_received);
-
 
1804
 
-
 
1805
	master_ctl = I915_READ(GEN8_MASTER_IRQ);
-
 
1806
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
-
 
1807
	if (!master_ctl)
-
 
1808
		return IRQ_NONE;
-
 
1809
 
-
 
1810
	I915_WRITE(GEN8_MASTER_IRQ, 0);
-
 
1811
	POSTING_READ(GEN8_MASTER_IRQ);
-
 
1812
 
-
 
1813
	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
-
 
1814
 
-
 
1815
	if (master_ctl & GEN8_DE_MISC_IRQ) {
-
 
1816
		tmp = I915_READ(GEN8_DE_MISC_IIR);
-
 
1817
		if (tmp & GEN8_DE_MISC_GSE)
-
 
1818
			intel_opregion_asle_intr(dev);
-
 
1819
		else if (tmp)
-
 
1820
			DRM_ERROR("Unexpected DE Misc interrupt\n");
-
 
1821
		else
-
 
1822
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
-
 
1823
 
-
 
1824
		if (tmp) {
-
 
1825
			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
-
 
1826
			ret = IRQ_HANDLED;
-
 
1827
		}
-
 
1828
	}
-
 
1829
 
-
 
1830
	if (master_ctl & GEN8_DE_PORT_IRQ) {
-
 
1831
		tmp = I915_READ(GEN8_DE_PORT_IIR);
-
 
1832
		if (tmp & GEN8_AUX_CHANNEL_A)
-
 
1833
			dp_aux_irq_handler(dev);
-
 
1834
		else if (tmp)
-
 
1835
			DRM_ERROR("Unexpected DE Port interrupt\n");
-
 
1836
		else
-
 
1837
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
-
 
1838
 
-
 
1839
		if (tmp) {
-
 
1840
			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
-
 
1841
			ret = IRQ_HANDLED;
-
 
1842
		}
-
 
1843
	}
-
 
1844
 
-
 
1845
	for_each_pipe(pipe) {
-
 
1846
		uint32_t pipe_iir;
-
 
1847
 
-
 
1848
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
-
 
1849
			continue;
-
 
1850
 
-
 
1851
		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
-
 
1852
//		if (pipe_iir & GEN8_PIPE_VBLANK)
-
 
1853
//			drm_handle_vblank(dev, pipe);
-
 
1854
 
-
 
1855
		if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
-
 
1856
//			intel_prepare_page_flip(dev, pipe);
-
 
1857
//			intel_finish_page_flip_plane(dev, pipe);
-
 
1858
		}
-
 
1859
 
-
 
1860
		if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
-
 
1861
			hsw_pipe_crc_irq_handler(dev, pipe);
-
 
1862
 
-
 
1863
		if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
-
 
1864
			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
-
 
1865
								  false))
-
 
1866
				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
-
 
1867
						 pipe_name(pipe));
-
 
1868
		}
-
 
1869
 
-
 
1870
		if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
-
 
1871
			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
-
 
1872
				  pipe_name(pipe),
-
 
1873
				  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
-
 
1874
		}
-
 
1875
 
-
 
1876
		if (pipe_iir) {
-
 
1877
			ret = IRQ_HANDLED;
-
 
1878
			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
-
 
1879
		} else
-
 
1880
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
-
 
1881
	}
-
 
1882
 
-
 
1883
	if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
-
 
1884
		/*
-
 
1885
		 * FIXME(BDW): Assume for now that the new interrupt handling
-
 
1886
		 * scheme also closed the SDE interrupt handling race we've seen
-
 
1887
		 * on older pch-split platforms. But this needs testing.
-
 
1888
		 */
-
 
1889
		u32 pch_iir = I915_READ(SDEIIR);
-
 
1890
 
-
 
1891
		cpt_irq_handler(dev, pch_iir);
-
 
1892
 
-
 
1893
		if (pch_iir) {
-
 
1894
			I915_WRITE(SDEIIR, pch_iir);
-
 
1895
			ret = IRQ_HANDLED;
-
 
1896
		}
-
 
1897
	}
-
 
1898
 
-
 
1899
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
-
 
1900
	POSTING_READ(GEN8_MASTER_IRQ);
-
 
1901
 
1475
	return ret;
1902
	return ret;
1476
}
1903
}
1477
 
1904
 
1478
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1905
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1479
			       bool reset_completed)
1906
			       bool reset_completed)
Line 1555... Line 1982...
1555
			 * the counter increment.
1982
			 * the counter increment.
1556
			 */
1983
			 */
1557
			atomic_inc(&dev_priv->gpu_error.reset_counter);
1984
			atomic_inc(&dev_priv->gpu_error.reset_counter);
Line 1558... Line 1985...
1558
 
1985
 
1559
		} else {
1986
		} else {
1560
			atomic_set(&error->reset_counter, I915_WEDGED);
1987
			atomic_set_mask(I915_WEDGED, &error->reset_counter);
Line 1561... Line 1988...
1561
	}
1988
	}
1562
 
1989
 
1563
		/*
1990
		/*
Line 1785... Line 2212...
1785
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2212
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1786
{
2213
{
1787
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2214
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1788
	unsigned long irqflags;
2215
	unsigned long irqflags;
1789
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2216
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1790
						     DE_PIPE_VBLANK_ILK(pipe);
2217
						     DE_PIPE_VBLANK(pipe);
Line 1791... Line 2218...
1791
 
2218
 
1792
	if (!i915_pipe_enabled(dev, pipe))
2219
	if (!i915_pipe_enabled(dev, pipe))
Line 1793... Line 2220...
1793
		return -EINVAL;
2220
		return -EINVAL;
Line 1808... Line 2235...
1808
	if (!i915_pipe_enabled(dev, pipe))
2235
	if (!i915_pipe_enabled(dev, pipe))
1809
		return -EINVAL;
2236
		return -EINVAL;
Line 1810... Line 2237...
1810
 
2237
 
1811
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2238
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1812
	imr = I915_READ(VLV_IMR);
2239
	imr = I915_READ(VLV_IMR);
1813
	if (pipe == 0)
2240
	if (pipe == PIPE_A)
1814
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2241
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1815
	else
2242
	else
1816
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2243
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1817
	I915_WRITE(VLV_IMR, imr);
2244
	I915_WRITE(VLV_IMR, imr);
Line 1820... Line 2247...
1820
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2247
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Line 1821... Line 2248...
1821
 
2248
 
1822
	return 0;
2249
	return 0;
Line -... Line 2250...
-
 
2250
}
-
 
2251
 
-
 
2252
static int gen8_enable_vblank(struct drm_device *dev, int pipe)
-
 
2253
{
-
 
2254
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2255
	unsigned long irqflags;
-
 
2256
 
-
 
2257
	if (!i915_pipe_enabled(dev, pipe))
-
 
2258
		return -EINVAL;
-
 
2259
 
-
 
2260
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
2261
	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
-
 
2262
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
-
 
2263
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
-
 
2264
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
2265
	return 0;
1823
}
2266
}
1824
 
2267
 
1825
/* Called from drm generic code, passed 'crtc' which
2268
/* Called from drm generic code, passed 'crtc' which
1826
 * we use as a pipe index
2269
 * we use as a pipe index
1827
 */
2270
 */
Line 1843... Line 2286...
1843
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2286
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1844
{
2287
{
1845
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2288
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1846
	unsigned long irqflags;
2289
	unsigned long irqflags;
1847
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2290
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1848
						     DE_PIPE_VBLANK_ILK(pipe);
2291
						     DE_PIPE_VBLANK(pipe);
Line 1849... Line 2292...
1849
 
2292
 
1850
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2293
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1851
	ironlake_disable_display_irq(dev_priv, bit);
2294
	ironlake_disable_display_irq(dev_priv, bit);
1852
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2295
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Line 1860... Line 2303...
1860
 
2303
 
1861
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2304
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1862
	i915_disable_pipestat(dev_priv, pipe,
2305
	i915_disable_pipestat(dev_priv, pipe,
1863
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2306
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1864
	imr = I915_READ(VLV_IMR);
2307
	imr = I915_READ(VLV_IMR);
1865
	if (pipe == 0)
2308
	if (pipe == PIPE_A)
1866
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2309
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1867
	else
2310
	else
1868
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2311
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1869
	I915_WRITE(VLV_IMR, imr);
2312
	I915_WRITE(VLV_IMR, imr);
1870
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2313
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Line -... Line 2314...
-
 
2314
}
-
 
2315
 
-
 
2316
static void gen8_disable_vblank(struct drm_device *dev, int pipe)
-
 
2317
{
-
 
2318
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2319
	unsigned long irqflags;
-
 
2320
 
-
 
2321
	if (!i915_pipe_enabled(dev, pipe))
-
 
2322
		return;
-
 
2323
 
-
 
2324
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
2325
	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
-
 
2326
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
-
 
2327
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
-
 
2328
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1871
}
2329
}
1872
 
2330
 
1873
static u32
2331
static u32
1874
ring_last_seqno(struct intel_ring_buffer *ring)
2332
ring_last_seqno(struct intel_ring_buffer *ring)
1875
{
2333
{
Line 2046... Line 2504...
2046
				 */
2504
				 */
2047
				ring->hangcheck.action = ring_stuck(ring,
2505
				ring->hangcheck.action = ring_stuck(ring,
2048
								    acthd);
2506
								    acthd);
Line 2049... Line 2507...
2049
 
2507
 
-
 
2508
				switch (ring->hangcheck.action) {
2050
				switch (ring->hangcheck.action) {
2509
				case HANGCHECK_IDLE:
2051
				case HANGCHECK_WAIT:
2510
				case HANGCHECK_WAIT:
2052
					break;
2511
					break;
2053
				case HANGCHECK_ACTIVE:
2512
				case HANGCHECK_ACTIVE:
2054
					ring->hangcheck.score += BUSY;
2513
					ring->hangcheck.score += BUSY;
Line 2061... Line 2520...
2061
					stuck[i] = true;
2520
					stuck[i] = true;
2062
					break;
2521
					break;
2063
				}
2522
				}
2064
			}
2523
			}
2065
		} else {
2524
		} else {
-
 
2525
			ring->hangcheck.action = HANGCHECK_ACTIVE;
-
 
2526
 
2066
			/* Gradually reduce the count so that we catch DoS
2527
			/* Gradually reduce the count so that we catch DoS
2067
			 * attempts across multiple batches.
2528
			 * attempts across multiple batches.
2068
			 */
2529
			 */
2069
			if (ring->hangcheck.score > 0)
2530
			if (ring->hangcheck.score > 0)
2070
				ring->hangcheck.score--;
2531
				ring->hangcheck.score--;
Line 2173... Line 2634...
2173
	I915_WRITE(VLV_IMR, 0xffffffff);
2634
	I915_WRITE(VLV_IMR, 0xffffffff);
2174
	I915_WRITE(VLV_IER, 0x0);
2635
	I915_WRITE(VLV_IER, 0x0);
2175
	POSTING_READ(VLV_IER);
2636
	POSTING_READ(VLV_IER);
2176
}
2637
}
Line -... Line 2638...
-
 
2638
 
-
 
2639
static void gen8_irq_preinstall(struct drm_device *dev)
-
 
2640
{
-
 
2641
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2642
	int pipe;
-
 
2643
 
-
 
2644
	atomic_set(&dev_priv->irq_received, 0);
-
 
2645
 
-
 
2646
	I915_WRITE(GEN8_MASTER_IRQ, 0);
-
 
2647
	POSTING_READ(GEN8_MASTER_IRQ);
-
 
2648
 
-
 
2649
	/* IIR can theoretically queue up two events. Be paranoid */
-
 
2650
#define GEN8_IRQ_INIT_NDX(type, which) do { \
-
 
2651
		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
-
 
2652
		POSTING_READ(GEN8_##type##_IMR(which)); \
-
 
2653
		I915_WRITE(GEN8_##type##_IER(which), 0); \
-
 
2654
		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
-
 
2655
		POSTING_READ(GEN8_##type##_IIR(which)); \
-
 
2656
		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
-
 
2657
	} while (0)
-
 
2658
 
-
 
2659
#define GEN8_IRQ_INIT(type) do { \
-
 
2660
		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
-
 
2661
		POSTING_READ(GEN8_##type##_IMR); \
-
 
2662
		I915_WRITE(GEN8_##type##_IER, 0); \
-
 
2663
		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
-
 
2664
		POSTING_READ(GEN8_##type##_IIR); \
-
 
2665
		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
-
 
2666
	} while (0)
-
 
2667
 
-
 
2668
	GEN8_IRQ_INIT_NDX(GT, 0);
-
 
2669
	GEN8_IRQ_INIT_NDX(GT, 1);
-
 
2670
	GEN8_IRQ_INIT_NDX(GT, 2);
-
 
2671
	GEN8_IRQ_INIT_NDX(GT, 3);
-
 
2672
 
-
 
2673
	for_each_pipe(pipe) {
-
 
2674
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
-
 
2675
	}
-
 
2676
 
-
 
2677
	GEN8_IRQ_INIT(DE_PORT);
-
 
2678
	GEN8_IRQ_INIT(DE_MISC);
-
 
2679
	GEN8_IRQ_INIT(PCU);
-
 
2680
#undef GEN8_IRQ_INIT
-
 
2681
#undef GEN8_IRQ_INIT_NDX
-
 
2682
 
-
 
2683
	POSTING_READ(GEN8_PCU_IIR);
-
 
2684
 
-
 
2685
	ibx_irq_preinstall(dev);
-
 
2686
}
2177
 
2687
 
2178
static void ibx_hpd_irq_setup(struct drm_device *dev)
2688
static void ibx_hpd_irq_setup(struct drm_device *dev)
2179
{
2689
{
2180
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2690
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2181
	struct drm_mode_config *mode_config = &dev->mode_config;
2691
	struct drm_mode_config *mode_config = &dev->mode_config;
Line 2237... Line 2747...
2237
	u32 pm_irqs, gt_irqs;
2747
	u32 pm_irqs, gt_irqs;
Line 2238... Line 2748...
2238
 
2748
 
Line 2239... Line 2749...
2239
	pm_irqs = gt_irqs = 0;
2749
	pm_irqs = gt_irqs = 0;
2240
 
2750
 
2241
	dev_priv->gt_irq_mask = ~0;
2751
	dev_priv->gt_irq_mask = ~0;
2242
	if (HAS_L3_GPU_CACHE(dev)) {
2752
	if (HAS_L3_DPF(dev)) {
2243
		/* L3 parity interrupt is always unmasked. */
2753
		/* L3 parity interrupt is always unmasked. */
2244
		dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2754
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
Line 2245... Line 2755...
2245
		gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2755
		gt_irqs |= GT_PARITY_ERROR(dev);
2246
	}
2756
	}
2247
 
2757
 
Line 2289... Line 2799...
2289
 
2799
 
2290
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2800
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2291
	} else {
2801
	} else {
2292
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2802
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
-
 
2803
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2293
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2804
				DE_AUX_CHANNEL_A |
2294
				DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2805
				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
-
 
2806
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2295
				DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
2807
				DE_POISON);
2296
		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2808
		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
Line 2297... Line 2809...
2297
	}
2809
	}
Line 2324... Line 2836...
2324
 
2836
 
2325
static int valleyview_irq_postinstall(struct drm_device *dev)
2837
static int valleyview_irq_postinstall(struct drm_device *dev)
2326
{
2838
{
2327
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2839
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2328
	u32 enable_mask;
2840
	u32 enable_mask;
-
 
2841
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2329
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2842
		PIPE_CRC_DONE_ENABLE;
Line 2330... Line 2843...
2330
	unsigned long irqflags;
2843
	unsigned long irqflags;
2331
 
2844
 
2332
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2845
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
Line 2354... Line 2867...
2354
	POSTING_READ(VLV_IER);
2867
	POSTING_READ(VLV_IER);
Line 2355... Line 2868...
2355
 
2868
 
2356
	/* Interrupt setup is already guaranteed to be single-threaded, this is
2869
	/* Interrupt setup is already guaranteed to be single-threaded, this is
2357
	 * just to make the assert_spin_locked check happy. */
2870
	 * just to make the assert_spin_locked check happy. */
2358
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2871
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2359
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2872
	i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2360
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2873
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2361
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2874
	i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
Line 2362... Line 2875...
2362
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2875
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2363
 
2876
 
Line 2375... Line 2888...
2375
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2888
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Line 2376... Line 2889...
2376
 
2889
 
2377
	return 0;
2890
	return 0;
Line -... Line 2891...
-
 
2891
}
-
 
2892
 
-
 
2893
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
-
 
2894
{
-
 
2895
	int i;
-
 
2896
 
-
 
2897
	/* These are interrupts we'll toggle with the ring mask register */
-
 
2898
	uint32_t gt_interrupts[] = {
-
 
2899
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-
 
2900
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
-
 
2901
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
-
 
2902
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
-
 
2903
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
-
 
2904
		0,
-
 
2905
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
-
 
2906
		};
-
 
2907
 
-
 
2908
	for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
-
 
2909
		u32 tmp = I915_READ(GEN8_GT_IIR(i));
-
 
2910
		if (tmp)
-
 
2911
			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
-
 
2912
				  i, tmp);
-
 
2913
		I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
-
 
2914
		I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
-
 
2915
	}
-
 
2916
	POSTING_READ(GEN8_GT_IER(0));
-
 
2917
}
-
 
2918
 
-
 
2919
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
-
 
2920
{
-
 
2921
	struct drm_device *dev = dev_priv->dev;
-
 
2922
	uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
-
 
2923
		GEN8_PIPE_CDCLK_CRC_DONE |
-
 
2924
		GEN8_PIPE_FIFO_UNDERRUN |
-
 
2925
		GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-
 
2926
	uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
-
 
2927
	int pipe;
-
 
2928
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
-
 
2929
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
-
 
2930
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
-
 
2931
 
-
 
2932
	for_each_pipe(pipe) {
-
 
2933
		u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
-
 
2934
		if (tmp)
-
 
2935
			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
-
 
2936
				  pipe, tmp);
-
 
2937
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
-
 
2938
		I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
-
 
2939
	}
-
 
2940
	POSTING_READ(GEN8_DE_PIPE_ISR(0));
-
 
2941
 
-
 
2942
	I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
-
 
2943
	I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
-
 
2944
	POSTING_READ(GEN8_DE_PORT_IER);
-
 
2945
}
-
 
2946
 
-
 
2947
static int gen8_irq_postinstall(struct drm_device *dev)
-
 
2948
{
-
 
2949
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2950
 
-
 
2951
	gen8_gt_irq_postinstall(dev_priv);
-
 
2952
	gen8_de_irq_postinstall(dev_priv);
-
 
2953
 
-
 
2954
	ibx_irq_postinstall(dev);
-
 
2955
 
-
 
2956
	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
-
 
2957
	POSTING_READ(GEN8_MASTER_IRQ);
-
 
2958
 
-
 
2959
	return 0;
-
 
2960
}
-
 
2961
 
-
 
2962
static void gen8_irq_uninstall(struct drm_device *dev)
-
 
2963
{
-
 
2964
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2965
	int pipe;
-
 
2966
 
-
 
2967
	if (!dev_priv)
-
 
2968
		return;
-
 
2969
 
-
 
2970
	atomic_set(&dev_priv->irq_received, 0);
-
 
2971
 
-
 
2972
	I915_WRITE(GEN8_MASTER_IRQ, 0);
-
 
2973
 
-
 
2974
#define GEN8_IRQ_FINI_NDX(type, which) do { \
-
 
2975
		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
-
 
2976
		I915_WRITE(GEN8_##type##_IER(which), 0); \
-
 
2977
		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
-
 
2978
	} while (0)
-
 
2979
 
-
 
2980
#define GEN8_IRQ_FINI(type) do { \
-
 
2981
		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
-
 
2982
		I915_WRITE(GEN8_##type##_IER, 0); \
-
 
2983
		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
-
 
2984
	} while (0)
-
 
2985
 
-
 
2986
	GEN8_IRQ_FINI_NDX(GT, 0);
-
 
2987
	GEN8_IRQ_FINI_NDX(GT, 1);
-
 
2988
	GEN8_IRQ_FINI_NDX(GT, 2);
-
 
2989
	GEN8_IRQ_FINI_NDX(GT, 3);
-
 
2990
 
-
 
2991
	for_each_pipe(pipe) {
-
 
2992
		GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
-
 
2993
	}
-
 
2994
 
-
 
2995
	GEN8_IRQ_FINI(DE_PORT);
-
 
2996
	GEN8_IRQ_FINI(DE_MISC);
-
 
2997
	GEN8_IRQ_FINI(PCU);
-
 
2998
#undef GEN8_IRQ_FINI
-
 
2999
#undef GEN8_IRQ_FINI_NDX
-
 
3000
 
-
 
3001
	POSTING_READ(GEN8_PCU_IIR);
2378
}
3002
}
2379
 
3003
 
2380
static void valleyview_irq_uninstall(struct drm_device *dev)
3004
static void valleyview_irq_uninstall(struct drm_device *dev)
2381
{
3005
{
Line 2449... Line 3073...
2449
}
3073
}
Line 2450... Line 3074...
2450
 
3074
 
2451
static int i8xx_irq_postinstall(struct drm_device *dev)
3075
static int i8xx_irq_postinstall(struct drm_device *dev)
2452
{
3076
{
-
 
3077
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Line 2453... Line 3078...
2453
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3078
	unsigned long irqflags;
2454
 
3079
 
Line 2455... Line 3080...
2455
	I915_WRITE16(EMR,
3080
	I915_WRITE16(EMR,
Line 2469... Line 3094...
2469
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3094
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2470
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3095
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2471
		     I915_USER_INTERRUPT);
3096
		     I915_USER_INTERRUPT);
2472
	POSTING_READ16(IER);
3097
	POSTING_READ16(IER);
Line -... Line 3098...
-
 
3098
 
-
 
3099
	/* Interrupt setup is already guaranteed to be single-threaded, this is
-
 
3100
	 * just to make the assert_spin_locked check happy. */
-
 
3101
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
3102
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
-
 
3103
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
-
 
3104
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2473
 
3105
 
2474
	return 0;
3106
	return 0;
Line 2475... Line 3107...
2475
}
3107
}
2476
 
3108
 
2477
/*
3109
/*
2478
 * Returns true when a page flip has completed.
3110
 * Returns true when a page flip has completed.
2479
 */
3111
 */
2480
static bool i8xx_handle_vblank(struct drm_device *dev,
3112
static bool i8xx_handle_vblank(struct drm_device *dev,
2481
			       int pipe, u16 iir)
3113
			       int plane, int pipe, u32 iir)
2482
{
3114
{
Line 2483... Line 3115...
2483
	drm_i915_private_t *dev_priv = dev->dev_private;
3115
	drm_i915_private_t *dev_priv = dev->dev_private;
2484
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
3116
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
Line 2485... Line 3117...
2485
 
3117
 
Line 2555... Line 3187...
2555
		i915_update_dri1_breadcrumb(dev);
3187
		i915_update_dri1_breadcrumb(dev);
Line 2556... Line 3188...
2556
 
3188
 
2557
		if (iir & I915_USER_INTERRUPT)
3189
		if (iir & I915_USER_INTERRUPT)
Line -... Line 3190...
-
 
3190
			notify_ring(dev, &dev_priv->ring[RCS]);
-
 
3191
 
-
 
3192
		for_each_pipe(pipe) {
-
 
3193
			int plane = pipe;
-
 
3194
			if (HAS_FBC(dev))
2558
			notify_ring(dev, &dev_priv->ring[RCS]);
3195
				plane = !plane;
2559
 
3196
 
2560
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
3197
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2561
		    i8xx_handle_vblank(dev, 0, iir))
3198
			    i8xx_handle_vblank(dev, plane, pipe, iir))
2562
			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
3199
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2563
 
3200
 
2564
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
-
 
-
 
3201
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Line 2565... Line 3202...
2565
		    i8xx_handle_vblank(dev, 1, iir))
3202
				i9xx_pipe_crc_irq_handler(dev, pipe);
2566
			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
3203
		}
Line 2567... Line 3204...
2567
 
3204
 
Line 2610... Line 3247...
2610
 
3247
 
2611
static int i915_irq_postinstall(struct drm_device *dev)
3248
static int i915_irq_postinstall(struct drm_device *dev)
2612
{
3249
{
2613
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3250
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
3251
	u32 enable_mask;
Line 2614... Line 3252...
2614
	u32 enable_mask;
3252
	unsigned long irqflags;
Line 2615... Line 3253...
2615
 
3253
 
2616
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3254
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
Line 2645... Line 3283...
2645
	I915_WRITE(IER, enable_mask);
3283
	I915_WRITE(IER, enable_mask);
2646
	POSTING_READ(IER);
3284
	POSTING_READ(IER);
Line 2647... Line 3285...
2647
 
3285
 
Line -... Line 3286...
-
 
3286
	i915_enable_asle_pipestat(dev);
-
 
3287
 
-
 
3288
	/* Interrupt setup is already guaranteed to be single-threaded, this is
-
 
3289
	 * just to make the assert_spin_locked check happy. */
-
 
3290
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
3291
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
-
 
3292
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2648
	i915_enable_asle_pipestat(dev);
3293
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2649
 
3294
 
Line 2650... Line 3295...
2650
	return 0;
3295
	return 0;
2651
}
3296
}
Line 2747... Line 3392...
2747
		if (iir & I915_USER_INTERRUPT)
3392
		if (iir & I915_USER_INTERRUPT)
2748
			notify_ring(dev, &dev_priv->ring[RCS]);
3393
			notify_ring(dev, &dev_priv->ring[RCS]);
Line 2749... Line 3394...
2749
 
3394
 
2750
		for_each_pipe(pipe) {
3395
		for_each_pipe(pipe) {
2751
			int plane = pipe;
3396
			int plane = pipe;
2752
			if (IS_MOBILE(dev))
3397
			if (HAS_FBC(dev))
Line 2753... Line 3398...
2753
				plane = !plane;
3398
				plane = !plane;
2754
 
3399
 
2755
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3400
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
Line 2756... Line 3401...
2756
			    i915_handle_vblank(dev, plane, pipe, iir))
3401
			    i915_handle_vblank(dev, plane, pipe, iir))
2757
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3402
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
-
 
3403
 
-
 
3404
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
-
 
3405
				blc_event = true;
2758
 
3406
 
Line 2759... Line 3407...
2759
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3407
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2760
				blc_event = true;
3408
				i9xx_pipe_crc_irq_handler(dev, pipe);
Line 2854... Line 3502...
2854
		enable_mask |= I915_BSD_USER_INTERRUPT;
3502
		enable_mask |= I915_BSD_USER_INTERRUPT;
Line 2855... Line 3503...
2855
 
3503
 
2856
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3504
	/* Interrupt setup is already guaranteed to be single-threaded, this is
2857
	 * just to make the assert_spin_locked check happy. */
3505
	 * just to make the assert_spin_locked check happy. */
2858
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3506
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
3507
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
-
 
3508
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
2859
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3509
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
Line 2860... Line 3510...
2860
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3510
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2861
 
3511
 
2862
	/*
3512
	/*
Line 2979... Line 3629...
2979
 
3629
 
2980
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3630
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
Line 2981... Line 3631...
2981
				  hotplug_status);
3631
				  hotplug_status);
2982
 
3632
 
-
 
3633
			intel_hpd_irq_handler(dev, hotplug_trigger,
-
 
3634
					      IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
-
 
3635
 
-
 
3636
			if (IS_G4X(dev) &&
Line 2983... Line 3637...
2983
			intel_hpd_irq_handler(dev, hotplug_trigger,
3637
			    (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
2984
					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3638
				dp_aux_irq_handler(dev);
2985
 
3639
 
Line 3000... Line 3654...
3000
			    i915_handle_vblank(dev, pipe, pipe, iir))
3654
			    i915_handle_vblank(dev, pipe, pipe, iir))
3001
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3655
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
Line 3002... Line 3656...
3002
 
3656
 
3003
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3657
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
-
 
3658
				blc_event = true;
-
 
3659
 
-
 
3660
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3004
				blc_event = true;
3661
				i9xx_pipe_crc_irq_handler(dev, pipe);
Line 3005... Line 3662...
3005
		}
3662
		}
3006
 
3663
 
Line 3104... Line 3761...
3104
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3761
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Line 3105... Line 3762...
3105
 
3762
 
3106
	setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3763
	setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
Line -... Line 3764...
-
 
3764
		    (unsigned long) dev_priv);
-
 
3765
 
3107
		    (unsigned long) dev_priv);
3766
 
3108
 
3767
	if (IS_GEN2(dev)) {
3109
	dev->driver->get_vblank_counter = i915_get_vblank_counter;
3768
		dev->max_vblank_count = 0;
3110
	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3769
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3111
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3770
	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
-
 
3771
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
-
 
3772
		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
-
 
3773
	} else {
3112
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3774
	dev->driver->get_vblank_counter = i915_get_vblank_counter;
Line 3113... Line 3775...
3113
		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3775
	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3114
	}
3776
	}
3115
 
-
 
3116
	if (drm_core_check_feature(dev, DRIVER_MODESET))
-
 
3117
		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3777
 
-
 
3778
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
Line 3118... Line 3779...
3118
	else
3779
		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3119
		dev->driver->get_vblank_timestamp = NULL;
3780
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3120
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3781
	}
3121
 
3782
 
3122
	if (IS_VALLEYVIEW(dev)) {
3783
	if (IS_VALLEYVIEW(dev)) {
3123
		dev->driver->irq_handler = valleyview_irq_handler;
3784
		dev->driver->irq_handler = valleyview_irq_handler;
3124
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
3785
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
3125
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
3786
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
-
 
3787
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
-
 
3788
		dev->driver->enable_vblank = valleyview_enable_vblank;
-
 
3789
		dev->driver->disable_vblank = valleyview_disable_vblank;
-
 
3790
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-
 
3791
	} else if (IS_GEN8(dev)) {
-
 
3792
		dev->driver->irq_handler = gen8_irq_handler;
-
 
3793
		dev->driver->irq_preinstall = gen8_irq_preinstall;
-
 
3794
		dev->driver->irq_postinstall = gen8_irq_postinstall;
3126
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
3795
		dev->driver->irq_uninstall = gen8_irq_uninstall;
3127
		dev->driver->enable_vblank = valleyview_enable_vblank;
3796
		dev->driver->enable_vblank = gen8_enable_vblank;
3128
		dev->driver->disable_vblank = valleyview_disable_vblank;
3797
		dev->driver->disable_vblank = gen8_disable_vblank;
3129
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3798
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3130
	} else if (HAS_PCH_SPLIT(dev)) {
3799
	} else if (HAS_PCH_SPLIT(dev)) {
Line 3194... Line 3863...
3194
	dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3863
	dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3195
	dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3864
	dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3196
	dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3865
	dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3197
	dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3866
	dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
Line 3198... Line 3867...
3198
 
3867
 
3199
	ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3868
	ironlake_disable_display_irq(dev_priv, 0xffffffff);
3200
	ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3869
	ibx_disable_display_interrupt(dev_priv, 0xffffffff);
3201
	ilk_disable_gt_irq(dev_priv, 0xffffffff);
3870
	ilk_disable_gt_irq(dev_priv, 0xffffffff);
Line 3202... Line 3871...
3202
	snb_disable_pm_irq(dev_priv, 0xffffffff);
3871
	snb_disable_pm_irq(dev_priv, 0xffffffff);
Line 3209... Line 3878...
3209
/* Restore interrupts so we can recover from Package C8+. */
3878
/* Restore interrupts so we can recover from Package C8+. */
3210
void hsw_pc8_restore_interrupts(struct drm_device *dev)
3879
void hsw_pc8_restore_interrupts(struct drm_device *dev)
3211
{
3880
{
3212
	struct drm_i915_private *dev_priv = dev->dev_private;
3881
	struct drm_i915_private *dev_priv = dev->dev_private;
3213
	unsigned long irqflags;
3882
	unsigned long irqflags;
3214
	uint32_t val, expected;
3883
	uint32_t val;
Line 3215... Line 3884...
3215
 
3884
 
Line 3216... Line 3885...
3216
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3885
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3217
 
-
 
3218
	val = I915_READ(DEIMR);
3886
 
Line 3219... Line 3887...
3219
	expected = ~DE_PCH_EVENT_IVB;
3887
	val = I915_READ(DEIMR);
3220
	WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
-
 
3221
 
3888
	WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
3222
	val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
-
 
Line 3223... Line 3889...
3223
	expected = ~SDE_HOTPLUG_MASK_CPT;
3889
 
3224
	WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
-
 
3225
	     val, expected);
3890
	val = I915_READ(SDEIMR);
Line 3226... Line 3891...
3226
 
3891
	WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
3227
	val = I915_READ(GTIMR);
-
 
3228
	expected = 0xffffffff;
3892
 
3229
	WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
-
 
Line 3230... Line 3893...
3230
 
3893
	val = I915_READ(GTIMR);
Line 3231... Line 3894...
3231
	val = I915_READ(GEN6_PMIMR);
3894
	WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
3232
	expected = 0xffffffff;
3895
 
3233
	WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
-
 
3234
	     expected);
-
 
3235
 
3896
	val = I915_READ(GEN6_PMIMR);
3236
	dev_priv->pc8.irqs_disabled = false;
3897
	WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
3237
 
3898
 
Line 3238... Line 3899...
3238
	ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3899
	dev_priv->pc8.irqs_disabled = false;