Subversion Repositories Kolibri OS

Rev

Rev 4104 | Rev 4293 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4104 Rev 4126
Line 484... Line 484...
484
	pipestat &= ~mask;
484
	pipestat &= ~mask;
485
	I915_WRITE(reg, pipestat);
485
	I915_WRITE(reg, pipestat);
486
		POSTING_READ(reg);
486
		POSTING_READ(reg);
487
}
487
}
Line 488... Line -...
488
 
-
 
489
#if 0
488
 
490
/**
489
/**
491
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
490
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
492
 */
491
 */
493
static void i915_enable_asle_pipestat(struct drm_device *dev)
492
static void i915_enable_asle_pipestat(struct drm_device *dev)
Line 504... Line 503...
504
		if (INTEL_INFO(dev)->gen >= 4)
503
		if (INTEL_INFO(dev)->gen >= 4)
505
		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
504
		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
Line 506... Line 505...
506
 
505
 
507
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
506
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
508
}
-
 
Line 509... Line 507...
509
#endif
507
}
510
 
508
 
511
/**
509
/**
512
 * i915_pipe_enabled - check if a pipe is enabled
510
 * i915_pipe_enabled - check if a pipe is enabled
Line 747... Line 745...
747
	 /* if there were no outputs to poll, poll was disabled,
745
	 /* if there were no outputs to poll, poll was disabled,
748
	  * therefore make sure it's enabled when disabling HPD on
746
	  * therefore make sure it's enabled when disabling HPD on
749
	  * some connectors */
747
	  * some connectors */
750
	if (hpd_disabled) {
748
	if (hpd_disabled) {
751
		drm_kms_helper_poll_enable(dev);
749
		drm_kms_helper_poll_enable(dev);
752
//       mod_timer(&dev_priv->hotplug_reenable_timer,
750
		mod_timer(&dev_priv->hotplug_reenable_timer,
753
//             jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
751
			  GetTimerTicks() + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
754
	}
752
	}
Line 755... Line 753...
755
 
753
 
Line 756... Line 754...
756
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
754
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Line 819... Line 817...
819
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
817
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
Line 820... Line 818...
820
 
818
 
821
	wake_up_all(&ring->irq_queue);
819
	wake_up_all(&ring->irq_queue);
Line 822... Line -...
822
}
-
 
823
 
820
}
824
#if 0
821
 
825
static void gen6_pm_rps_work(struct work_struct *work)
822
static void gen6_pm_rps_work(struct work_struct *work)
826
{
823
{
827
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
824
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
Line 873... Line 870...
873
		 * On VLV, when we enter RC6 we may not be at the minimum
870
		 * On VLV, when we enter RC6 we may not be at the minimum
874
		 * voltage level, so arm a timer to check.  It should only
871
		 * voltage level, so arm a timer to check.  It should only
875
		 * fire when there's activity or once after we've entered
872
		 * fire when there's activity or once after we've entered
876
		 * RC6, and then won't be re-armed until the next RPS interrupt.
873
		 * RC6, and then won't be re-armed until the next RPS interrupt.
877
		 */
874
		 */
878
		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
875
//		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
879
				 msecs_to_jiffies(100));
876
//				 msecs_to_jiffies(100));
880
	}
877
	}
Line 881... Line 878...
881
 
878
 
882
	mutex_unlock(&dev_priv->rps.hw_lock);
879
	mutex_unlock(&dev_priv->rps.hw_lock);
Line 926... Line 923...
926
	ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
923
	ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
927
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
924
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Line 928... Line 925...
928
 
925
 
Line 929... Line -...
929
	mutex_unlock(&dev_priv->dev->struct_mutex);
-
 
930
 
-
 
931
	parity_event[0] = I915_L3_PARITY_UEVENT "=1";
-
 
932
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
-
 
933
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
-
 
934
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
-
 
935
	parity_event[4] = NULL;
-
 
936
 
-
 
937
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
-
 
938
			   KOBJ_CHANGE, parity_event);
926
	mutex_unlock(&dev_priv->dev->struct_mutex);
939
 
927
 
Line 940... Line -...
940
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
-
 
941
		  row, bank, subbank);
-
 
942
 
-
 
943
	kfree(parity_event[3]);
928
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
Line 944... Line 929...
944
	kfree(parity_event[2]);
929
		  row, bank, subbank);
945
	kfree(parity_event[1]);
930
 
946
}
931
}
Line 957... Line 942...
957
	spin_unlock(&dev_priv->irq_lock);
942
	spin_unlock(&dev_priv->irq_lock);
Line 958... Line 943...
958
 
943
 
959
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
944
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Line 960... Line -...
960
}
-
 
961
 
-
 
962
#endif
945
}
963
 
946
 
964
static void ilk_gt_irq_handler(struct drm_device *dev,
947
static void ilk_gt_irq_handler(struct drm_device *dev,
965
			       struct drm_i915_private *dev_priv,
948
			       struct drm_i915_private *dev_priv,
966
			       u32 gt_iir)
949
			       u32 gt_iir)
Line 987... Line 970...
987
 
970
 
988
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
971
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
989
		      GT_BSD_CS_ERROR_INTERRUPT |
972
		      GT_BSD_CS_ERROR_INTERRUPT |
990
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
973
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
991
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
974
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
992
//       i915_handle_error(dev, false);
975
		i915_handle_error(dev, false);
Line 993... Line 976...
993
	}
976
	}
994
 
977
 
995
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
978
	if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
Line 996... Line 979...
996
//		ivybridge_handle_parity_error(dev);
979
		ivybridge_parity_error_irq_handler(dev);
997
}
980
}
Line 1020... Line 1003...
1020
		if (!(hpd[i] & hotplug_trigger) ||
1003
		if (!(hpd[i] & hotplug_trigger) ||
1021
		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1004
		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1022
			continue;
1005
			continue;
Line 1023... Line 1006...
1023
 
1006
 
1024
		dev_priv->hpd_event_bits |= (1 << i);
1007
		dev_priv->hpd_event_bits |= (1 << i);
1025
//        if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
1008
		if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
1026
//                  dev_priv->hpd_stats[i].hpd_last_jiffies
1009
                  dev_priv->hpd_stats[i].hpd_last_jiffies
1027
//                  + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1010
                  + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1028
//            dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks;
1011
			dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks();
-
 
1012
           dev_priv->hpd_stats[i].hpd_cnt = 0;
1029
//           dev_priv->hpd_stats[i].hpd_cnt = 0;
1013
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1030
//       } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1014
       } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
-
 
1015
           dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1031
//           dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1016
			dev_priv->hpd_event_bits &= ~(1 << i);
1032
//           DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1017
           DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1033
//           ret = true;
1018
			storm_detected = true;
1034
//       } else {
1019
		} else {
-
 
1020
			dev_priv->hpd_stats[i].hpd_cnt++;
-
 
1021
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1035
			dev_priv->hpd_stats[i].hpd_cnt++;
1022
				      dev_priv->hpd_stats[i].hpd_cnt);
1036
//       }
1023
		}
Line 1037... Line 1024...
1037
	}
1024
	}
1038
 
1025
 
1039
	if (storm_detected)
1026
	if (storm_detected)
Line 1040... Line 1027...
1040
		dev_priv->display.hpd_irq_setup(dev);
1027
		dev_priv->display.hpd_irq_setup(dev);
-
 
1028
	spin_unlock(&dev_priv->irq_lock);
-
 
1029
 
-
 
1030
	/*
-
 
1031
	 * Our hotplug handler can grab modeset locks (by calling down into the
-
 
1032
	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
-
 
1033
	 * queue for otherwise the flush_work in the pageflip code will
1041
	spin_unlock(&dev_priv->irq_lock);
1034
	 * deadlock.
Line 1042... Line 1035...
1042
 
1035
	 */
1043
 
1036
	schedule_work(&dev_priv->hotplug_work);
1044
}
1037
}
Line 1075... Line 1068...
1075
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1068
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1076
			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1069
			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
Line 1077... Line 1070...
1077
 
1070
 
1078
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1071
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1079
			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1072
			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1080
//           i915_handle_error(dev_priv->dev, false);
1073
			i915_handle_error(dev_priv->dev, false);
1081
		}
1074
		}
1082
	}
1075
	}
Line 1083... Line 1076...
1083
}
1076
}
Line 1150... Line 1143...
1150
		}
1143
		}
Line 1151... Line 1144...
1151
 
1144
 
1152
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1145
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
Line 1153... Line 1146...
1153
			gmbus_irq_handler(dev);
1146
			gmbus_irq_handler(dev);
1154
 
1147
 
Line 1155... Line 1148...
1155
//        if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
1148
		if (pm_iir)
1156
//            gen6_queue_rps_work(dev_priv, pm_iir);
1149
			gen6_rps_irq_handler(dev_priv, pm_iir);
1157
 
1150
 
1158
		I915_WRITE(GTIIR, gt_iir);
1151
		I915_WRITE(GTIIR, gt_iir);
Line 1308... Line 1301...
1308
	struct drm_i915_private *dev_priv = dev->dev_private;
1301
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1309... Line 1302...
1309
 
1302
 
1310
	if (de_iir & DE_AUX_CHANNEL_A)
1303
	if (de_iir & DE_AUX_CHANNEL_A)
Line 1311... Line -...
1311
		dp_aux_irq_handler(dev);
-
 
1312
 
1304
		dp_aux_irq_handler(dev);
1313
#if 0
1305
 
Line -... Line 1306...
-
 
1306
	if (de_iir & DE_GSE)
1314
	if (de_iir & DE_GSE)
1307
		intel_opregion_asle_intr(dev);
1315
		intel_opregion_asle_intr(dev);
1308
 
Line 1316... Line 1309...
1316
 
1309
#if 0
1317
	if (de_iir & DE_PIPEA_VBLANK)
1310
	if (de_iir & DE_PIPEA_VBLANK)
-
 
1311
		drm_handle_vblank(dev, 0);
Line 1318... Line 1312...
1318
		drm_handle_vblank(dev, 0);
1312
 
1319
 
1313
	if (de_iir & DE_PIPEB_VBLANK)
1320
	if (de_iir & DE_PIPEB_VBLANK)
-
 
Line 1321... Line 1314...
1321
		drm_handle_vblank(dev, 1);
1314
		drm_handle_vblank(dev, 1);
1322
 
1315
#endif
1323
	if (de_iir & DE_POISON)
1316
 
Line 1363... Line 1356...
1363
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1356
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1364
{
1357
{
1365
	struct drm_i915_private *dev_priv = dev->dev_private;
1358
	struct drm_i915_private *dev_priv = dev->dev_private;
1366
	int i;
1359
	int i;
Line 1367... Line 1360...
1367
 
1360
 
1368
//	if (de_iir & DE_ERR_INT_IVB)
1361
	if (de_iir & DE_ERR_INT_IVB)
Line 1369... Line 1362...
1369
//		ivb_err_int_handler(dev);
1362
		ivb_err_int_handler(dev);
1370
 
1363
 
Line 1371... Line 1364...
1371
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1364
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
Line 1506... Line 1499...
1506
	 */
1499
	 */
1507
	if (reset_completed)
1500
	if (reset_completed)
1508
		wake_up_all(&dev_priv->gpu_error.reset_queue);
1501
		wake_up_all(&dev_priv->gpu_error.reset_queue);
1509
}
1502
}
Line 1510... Line -...
1510
 
-
 
1511
#if 0
1503
 
1512
/**
1504
/**
1513
 * i915_error_work_func - do process context error handling work
1505
 * i915_error_work_func - do process context error handling work
1514
 * @work: work struct
1506
 * @work: work struct
1515
 *
1507
 *
Line 1526... Line 1518...
1526
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1518
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1527
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1519
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1528
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1520
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1529
	int ret;
1521
	int ret;
Line 1530... Line -...
1530
 
-
 
1531
	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
-
 
1532
 
1522
 
1533
	/*
1523
	/*
1534
	 * Note that there's only one work item which does gpu resets, so we
1524
	 * Note that there's only one work item which does gpu resets, so we
1535
	 * need not worry about concurrent gpu resets potentially incrementing
1525
	 * need not worry about concurrent gpu resets potentially incrementing
1536
	 * error->reset_counter twice. We only need to take care of another
1526
	 * error->reset_counter twice. We only need to take care of another
Line 1540... Line 1530...
1540
	 * the reset in-progress bit is only ever set by code outside of this
1530
	 * the reset in-progress bit is only ever set by code outside of this
1541
	 * work we don't need to worry about any other races.
1531
	 * work we don't need to worry about any other races.
1542
	 */
1532
	 */
1543
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1533
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1544
		DRM_DEBUG_DRIVER("resetting chip\n");
1534
		DRM_DEBUG_DRIVER("resetting chip\n");
1545
		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
-
 
1546
				   reset_event);
-
 
Line 1547... Line 1535...
1547
 
1535
 
1548
		/*
1536
		/*
1549
		 * All state reset _must_ be completed before we update the
1537
		 * All state reset _must_ be completed before we update the
1550
		 * reset counter, for otherwise waiters might miss the reset
1538
		 * reset counter, for otherwise waiters might miss the reset
1551
		 * pending state and not properly drop locks, resulting in
1539
		 * pending state and not properly drop locks, resulting in
1552
		 * deadlocks with the reset work.
1540
		 * deadlocks with the reset work.
1553
		 */
1541
		 */
Line 1554... Line 1542...
1554
		ret = i915_reset(dev);
1542
//       ret = i915_reset(dev);
Line 1555... Line 1543...
1555
 
1543
 
1556
		intel_display_handle_reset(dev);
1544
//       intel_display_handle_reset(dev);
1557
 
1545
 
1558
		if (ret == 0) {
1546
		if (ret == 0) {
Line 1564... Line 1552...
1564
			 * Since unlock operations are a one-sided barrier only,
1552
			 * Since unlock operations are a one-sided barrier only,
1565
			 * we need to insert a barrier here to order any seqno
1553
			 * we need to insert a barrier here to order any seqno
1566
			 * updates before
1554
			 * updates before
1567
			 * the counter increment.
1555
			 * the counter increment.
1568
			 */
1556
			 */
1569
			smp_mb__before_atomic_inc();
-
 
1570
			atomic_inc(&dev_priv->gpu_error.reset_counter);
1557
			atomic_inc(&dev_priv->gpu_error.reset_counter);
Line 1571... Line -...
1571
 
-
 
1572
			kobject_uevent_env(&dev->primary->kdev.kobj,
-
 
1573
					   KOBJ_CHANGE, reset_done_event);
1558
 
1574
		} else {
1559
		} else {
1575
			atomic_set(&error->reset_counter, I915_WEDGED);
1560
			atomic_set(&error->reset_counter, I915_WEDGED);
Line 1576... Line 1561...
1576
	}
1561
	}
Line 1687... Line 1672...
1687
 */
1672
 */
1688
void i915_handle_error(struct drm_device *dev, bool wedged)
1673
void i915_handle_error(struct drm_device *dev, bool wedged)
1689
{
1674
{
1690
	struct drm_i915_private *dev_priv = dev->dev_private;
1675
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1691... Line 1676...
1691
 
1676
 
1692
	i915_capture_error_state(dev);
1677
//   i915_capture_error_state(dev);
Line 1693... Line 1678...
1693
	i915_report_and_clear_eir(dev);
1678
	i915_report_and_clear_eir(dev);
1694
 
1679
 
1695
	if (wedged) {
1680
	if (wedged) {
Line 1719... Line 1704...
1719
	 * code will deadlock.
1704
	 * code will deadlock.
1720
	 */
1705
	 */
1721
	schedule_work(&dev_priv->gpu_error.work);
1706
	schedule_work(&dev_priv->gpu_error.work);
1722
}
1707
}
Line -... Line 1708...
-
 
1708
 
1723
 
1709
#if 0
1724
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1710
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1725
{
1711
{
1726
	drm_i915_private_t *dev_priv = dev->dev_private;
1712
	drm_i915_private_t *dev_priv = dev->dev_private;
1727
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1713
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
Line 2038... Line 2024...
2038
//               if (waitqueue_active(&ring->irq_queue)) {
2024
//               if (waitqueue_active(&ring->irq_queue)) {
2039
					/* Issue a wake-up to catch stuck h/w. */
2025
					/* Issue a wake-up to catch stuck h/w. */
2040
//                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2026
//                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2041
//                         ring->name);
2027
//                         ring->name);
2042
//                   wake_up_all(&ring->irq_queue);
2028
//                   wake_up_all(&ring->irq_queue);
2043
//                   ring->hangcheck.score += HUNG;
-
 
2044
//               } else
2029
//               } else
2045
					busy = false;
2030
					busy = false;
2046
			} else {
2031
			} else {
2047
				/* We always increment the hangcheck score
2032
				/* We always increment the hangcheck score
2048
				 * if the ring is busy and still processing
2033
				 * if the ring is busy and still processing
Line 2539... Line 2524...
2539
		 * have been cleared after the pipestat interrupt was received.
2524
		 * have been cleared after the pipestat interrupt was received.
2540
		 * It doesn't set the bit in iir again, but it still produces
2525
		 * It doesn't set the bit in iir again, but it still produces
2541
		 * interrupts (for non-MSI).
2526
		 * interrupts (for non-MSI).
2542
		 */
2527
		 */
2543
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2528
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2544
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2529
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2545
//           i915_handle_error(dev, false);
2530
			i915_handle_error(dev, false);
Line 2546... Line 2531...
2546
 
2531
 
2547
		for_each_pipe(pipe) {
2532
		for_each_pipe(pipe) {
2548
			int reg = PIPESTAT(pipe);
2533
			int reg = PIPESTAT(pipe);
Line 2654... Line 2639...
2654
 
2639
 
2655
	I915_WRITE(IMR, dev_priv->irq_mask);
2640
	I915_WRITE(IMR, dev_priv->irq_mask);
2656
	I915_WRITE(IER, enable_mask);
2641
	I915_WRITE(IER, enable_mask);
Line 2657... Line 2642...
2657
	POSTING_READ(IER);
2642
	POSTING_READ(IER);
Line 2658... Line 2643...
2658
 
2643
 
2659
//	intel_opregion_enable_asle(dev);
2644
	i915_enable_asle_pipestat(dev);
Line 2660... Line 2645...
2660
 
2645
 
Line 2714... Line 2699...
2714
		 * have been cleared after the pipestat interrupt was received.
2699
		 * have been cleared after the pipestat interrupt was received.
2715
		 * It doesn't set the bit in iir again, but it still produces
2700
		 * It doesn't set the bit in iir again, but it still produces
2716
		 * interrupts (for non-MSI).
2701
		 * interrupts (for non-MSI).
2717
		 */
2702
		 */
2718
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2703
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2719
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2704
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2720
//           i915_handle_error(dev, false);
2705
			i915_handle_error(dev, false);
Line 2721... Line 2706...
2721
 
2706
 
2722
		for_each_pipe(pipe) {
2707
		for_each_pipe(pipe) {
2723
			int reg = PIPESTAT(pipe);
2708
			int reg = PIPESTAT(pipe);
Line 2769... Line 2754...
2769
 
2754
 
2770
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2755
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2771
				blc_event = true;
2756
				blc_event = true;
Line 2772... Line 2757...
2772
		}
2757
		}
2773
 
2758
 
Line 2774... Line 2759...
2774
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2759
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2775
//			intel_opregion_asle_intr(dev);
2760
			intel_opregion_asle_intr(dev);
2776
 
2761
 
2777
		/* With MSI, interrupts are only generated when iir
2762
		/* With MSI, interrupts are only generated when iir
Line 2888... Line 2873...
2888
	POSTING_READ(IER);
2873
	POSTING_READ(IER);
Line 2889... Line 2874...
2889
 
2874
 
2890
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2875
	I915_WRITE(PORT_HOTPLUG_EN, 0);
Line 2891... Line 2876...
2891
	POSTING_READ(PORT_HOTPLUG_EN);
2876
	POSTING_READ(PORT_HOTPLUG_EN);
Line 2892... Line 2877...
2892
 
2877
 
2893
//	intel_opregion_enable_asle(dev);
2878
	i915_enable_asle_pipestat(dev);
Line 2894... Line 2879...
2894
 
2879
 
Line 2952... Line 2937...
2952
		 * have been cleared after the pipestat interrupt was received.
2937
		 * have been cleared after the pipestat interrupt was received.
2953
		 * It doesn't set the bit in iir again, but it still produces
2938
		 * It doesn't set the bit in iir again, but it still produces
2954
		 * interrupts (for non-MSI).
2939
		 * interrupts (for non-MSI).
2955
		 */
2940
		 */
2956
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2941
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2957
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2942
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2958
//           i915_handle_error(dev, false);
2943
			i915_handle_error(dev, false);
Line 2959... Line 2944...
2959
 
2944
 
2960
		for_each_pipe(pipe) {
2945
		for_each_pipe(pipe) {
2961
			int reg = PIPESTAT(pipe);
2946
			int reg = PIPESTAT(pipe);
Line 3012... Line 2997...
3012
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2997
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3013
				blc_event = true;
2998
				blc_event = true;
3014
		}
2999
		}
Line 3015... Line 3000...
3015
 
3000
 
3016
 
3001
 
Line 3017... Line 3002...
3017
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3002
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3018
//			intel_opregion_asle_intr(dev);
3003
			intel_opregion_asle_intr(dev);
Line 3019... Line 3004...
3019
 
3004
 
Line 3064... Line 3049...
3064
		I915_WRITE(PIPESTAT(pipe),
3049
		I915_WRITE(PIPESTAT(pipe),
3065
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3050
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3066
	I915_WRITE(IIR, I915_READ(IIR));
3051
	I915_WRITE(IIR, I915_READ(IIR));
3067
}
3052
}
Line -... Line 3053...
-
 
3053
 
-
 
3054
static void i915_reenable_hotplug_timer_func(unsigned long data)
-
 
3055
{
-
 
3056
	drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
-
 
3057
	struct drm_device *dev = dev_priv->dev;
-
 
3058
	struct drm_mode_config *mode_config = &dev->mode_config;
-
 
3059
	unsigned long irqflags;
-
 
3060
	int i;
-
 
3061
 
-
 
3062
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
3063
	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
-
 
3064
		struct drm_connector *connector;
-
 
3065
 
-
 
3066
		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
-
 
3067
			continue;
-
 
3068
 
-
 
3069
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-
 
3070
 
-
 
3071
		list_for_each_entry(connector, &mode_config->connector_list, head) {
-
 
3072
			struct intel_connector *intel_connector = to_intel_connector(connector);
-
 
3073
 
-
 
3074
			if (intel_connector->encoder->hpd_pin == i) {
-
 
3075
				if (connector->polled != intel_connector->polled)
-
 
3076
					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
-
 
3077
							 drm_get_connector_name(connector));
-
 
3078
				connector->polled = intel_connector->polled;
-
 
3079
				if (!connector->polled)
-
 
3080
					connector->polled = DRM_CONNECTOR_POLL_HPD;
-
 
3081
			}
-
 
3082
		}
-
 
3083
	}
-
 
3084
	if (dev_priv->display.hpd_irq_setup)
-
 
3085
		dev_priv->display.hpd_irq_setup(dev);
-
 
3086
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
3087
}
3068
 
3088
 
3069
void intel_irq_init(struct drm_device *dev)
3089
void intel_irq_init(struct drm_device *dev)
3070
{
3090
{
Line 3071... Line 3091...
3071
	struct drm_i915_private *dev_priv = dev->dev_private;
3091
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3092
 
-
 
3093
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
-
 
3094
	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
Line 3072... Line 3095...
3072
 
3095
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
-
 
3096
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Line 3073... Line 3097...
3073
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3097
 
Line 3074... Line 3098...
3074
 
3098
	setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,