Subversion Repositories Kolibri OS

Rev

Rev 5367 | Rev 6320 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5367 Rev 6084
Line 25... Line 25...
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
27
 *
28
 */
28
 */
Line 29... Line 29...
29
 
29
 
30
//#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include "i915_drv.h"
33
#include "i915_drv.h"
34
#include "i915_trace.h"
34
#include "i915_trace.h"
Line 35... Line 35...
35
#include "intel_drv.h"
35
#include "intel_drv.h"
-
 
36
 
36
 
37
#include 
37
#include 
38
#include 
38
#include 
39
#include 
Line 39... Line 40...
39
#include 
40
#include 
Line 40... Line 41...
40
#include  
41
#include  
Line 41... Line -...
41
 
-
 
42
#include 
42
 
Line 43... Line 43...
43
 
43
#include 
44
#include 
44
 
45
 
45
#include 
Line 322... Line 322...
322
	GEN_DEFAULT_PIPEOFFSETS,
322
	GEN_DEFAULT_PIPEOFFSETS,
323
	IVB_CURSOR_OFFSETS,
323
	IVB_CURSOR_OFFSETS,
324
};
324
};
Line 325... Line 325...
325
 
325
 
326
static const struct intel_device_info intel_cherryview_info = {
-
 
327
	.is_preliminary = 1,
326
static const struct intel_device_info intel_cherryview_info = {
328
	.gen = 8, .num_pipes = 3,
327
	.gen = 8, .num_pipes = 3,
329
	.need_gfx_hws = 1, .has_hotplug = 1,
328
	.need_gfx_hws = 1, .has_hotplug = 1,
330
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
329
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
331
	.is_valleyview = 1,
330
	.is_valleyview = 1,
332
	.display_mmio_offset = VLV_DISPLAY_BASE,
331
	.display_mmio_offset = VLV_DISPLAY_BASE,
333
	GEN_CHV_PIPEOFFSETS,
332
	GEN_CHV_PIPEOFFSETS,
334
	CURSOR_OFFSETS,
333
	CURSOR_OFFSETS,
Line 335... Line 334...
335
};
334
};
336
 
-
 
337
static const struct intel_device_info intel_skylake_info = {
335
 
338
	.is_preliminary = 1,
336
static const struct intel_device_info intel_skylake_info = {
339
	.is_skylake = 1,
337
	.is_skylake = 1,
340
	.gen = 9, .num_pipes = 3,
338
	.gen = 9, .num_pipes = 3,
341
	.need_gfx_hws = 1, .has_hotplug = 1,
339
	.need_gfx_hws = 1, .has_hotplug = 1,
342
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
340
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-
 
341
	.has_llc = 1,
-
 
342
	.has_ddi = 1,
-
 
343
	.has_fpga_dbg = 1,
-
 
344
	.has_fbc = 1,
-
 
345
	GEN_DEFAULT_PIPEOFFSETS,
-
 
346
	IVB_CURSOR_OFFSETS,
-
 
347
};
-
 
348
 
-
 
349
static const struct intel_device_info intel_skylake_gt3_info = {
-
 
350
	.is_skylake = 1,
-
 
351
	.gen = 9, .num_pipes = 3,
-
 
352
	.need_gfx_hws = 1, .has_hotplug = 1,
-
 
353
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-
 
354
	.has_llc = 1,
-
 
355
	.has_ddi = 1,
-
 
356
	.has_fpga_dbg = 1,
-
 
357
	.has_fbc = 1,
-
 
358
	GEN_DEFAULT_PIPEOFFSETS,
-
 
359
	IVB_CURSOR_OFFSETS,
-
 
360
};
-
 
361
 
-
 
362
static const struct intel_device_info intel_broxton_info = {
-
 
363
	.is_preliminary = 1,
-
 
364
	.gen = 9,
-
 
365
	.need_gfx_hws = 1, .has_hotplug = 1,
-
 
366
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-
 
367
	.num_pipes = 3,
343
	.has_llc = 1,
368
	.has_ddi = 1,
344
	.has_ddi = 1,
369
	.has_fpga_dbg = 1,
345
	.has_fbc = 1,
370
	.has_fbc = 1,
346
	GEN_DEFAULT_PIPEOFFSETS,
371
	GEN_DEFAULT_PIPEOFFSETS,
Line 378... Line 403...
378
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
403
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
379
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
404
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
380
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
405
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
381
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
406
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
382
	INTEL_CHV_IDS(&intel_cherryview_info),	\
407
	INTEL_CHV_IDS(&intel_cherryview_info),	\
-
 
408
	INTEL_SKL_GT1_IDS(&intel_skylake_info),	\
383
	INTEL_SKL_IDS(&intel_skylake_info)
409
	INTEL_SKL_GT2_IDS(&intel_skylake_info),	\
-
 
410
	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),	\
-
 
411
	INTEL_BXT_IDS(&intel_broxton_info)
Line 384... Line 412...
384
 
412
 
385
static const struct pci_device_id pciidlist[] = {       /* aka */
413
static const struct pci_device_id pciidlist[] = {		/* aka */
386
	INTEL_PCI_IDS,
414
	INTEL_PCI_IDS,
387
    {0, 0, 0}
415
	{0, 0, 0}
Line 390... Line 418...
390
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
418
#define INTEL_PCH_DEVICE_ID_MASK        0xff00
391
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
419
#define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
392
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
420
#define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
393
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
421
#define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
394
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
422
#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
-
 
423
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
-
 
424
{
-
 
425
	enum intel_pch ret = PCH_NOP;
-
 
426
 
-
 
427
	/*
-
 
428
	 * In a virtualized passthrough environment we can be in a
-
 
429
	 * setup where the ISA bridge is not able to be passed through.
-
 
430
	 * In this case, a south bridge can be emulated and we have to
-
 
431
	 * make an educated guess as to which PCH is really there.
-
 
432
	 */
-
 
433
 
-
 
434
	if (IS_GEN5(dev)) {
-
 
435
		ret = PCH_IBX;
-
 
436
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
-
 
437
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
-
 
438
		ret = PCH_CPT;
-
 
439
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
-
 
440
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-
 
441
		ret = PCH_LPT;
-
 
442
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
-
 
443
	} else if (IS_SKYLAKE(dev)) {
-
 
444
		ret = PCH_SPT;
-
 
445
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
-
 
446
	}
-
 
447
 
-
 
448
	return ret;
-
 
449
}
Line 395... Line 450...
395
 
450
 
396
void intel_detect_pch(struct drm_device *dev)
451
void intel_detect_pch(struct drm_device *dev)
397
{
452
{
398
    struct drm_i915_private *dev_priv = dev->dev_private;
453
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 436... Line 491...
436
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
491
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
437
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
492
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
438
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
493
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
439
				dev_priv->pch_type = PCH_LPT;
494
				dev_priv->pch_type = PCH_LPT;
440
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
495
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
441
				WARN_ON(!IS_HASWELL(dev));
496
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
442
				WARN_ON(IS_HSW_ULT(dev));
497
				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
443
			} else if (IS_BROADWELL(dev)) {
-
 
444
				dev_priv->pch_type = PCH_LPT;
-
 
445
				dev_priv->pch_id =
-
 
446
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
-
 
447
				DRM_DEBUG_KMS("This is Broadwell, assuming "
-
 
448
					      "LynxPoint LP PCH\n");
-
 
449
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
498
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
450
				dev_priv->pch_type = PCH_LPT;
499
				dev_priv->pch_type = PCH_LPT;
451
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
500
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
452
				WARN_ON(!IS_HASWELL(dev));
501
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
453
				WARN_ON(!IS_HSW_ULT(dev));
502
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
454
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
503
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
455
				dev_priv->pch_type = PCH_SPT;
504
				dev_priv->pch_type = PCH_SPT;
456
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
505
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
457
				WARN_ON(!IS_SKYLAKE(dev));
506
				WARN_ON(!IS_SKYLAKE(dev));
458
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
507
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
459
				dev_priv->pch_type = PCH_SPT;
508
				dev_priv->pch_type = PCH_SPT;
460
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
509
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
461
				WARN_ON(!IS_SKYLAKE(dev));
510
				WARN_ON(!IS_SKYLAKE(dev));
-
 
511
			} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
-
 
512
				dev_priv->pch_type = intel_virt_detect_pch(dev);
462
			} else
513
			} else
463
				continue;
514
				continue;
Line 464... Line 515...
464
 
515
 
465
			break;
516
			break;
Line 495... Line 546...
495
 
546
 
496
	return true;
547
	return true;
Line 497... Line 548...
497
}
548
}
-
 
549
 
-
 
550
#if 0
-
 
551
void i915_firmware_load_error_print(const char *fw_path, int err)
-
 
552
{
-
 
553
	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
-
 
554
 
-
 
555
	/*
-
 
556
	 * If the reason is not known assume -ENOENT since that's the most
-
 
557
	 * usual failure mode.
-
 
558
	 */
-
 
559
	if (!err)
-
 
560
		err = -ENOENT;
-
 
561
 
-
 
562
	if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
-
 
563
		return;
-
 
564
 
-
 
565
	DRM_ERROR(
-
 
566
	  "The driver is built-in, so to load the firmware you need to\n"
-
 
567
	  "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
-
 
568
	  "in your initrd/initramfs image.\n");
498
 
569
}
499
#if 0
570
 
500
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
571
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
501
{
572
{
Line 513... Line 584...
513
}
584
}
Line 514... Line 585...
514
 
585
 
515
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
586
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
516
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
587
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
-
 
588
			      bool rpm_resume);
-
 
589
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
-
 
590
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
Line 517... Line 591...
517
			      bool rpm_resume);
591
 
518
 
592
 
519
static int i915_drm_suspend(struct drm_device *dev)
593
static int i915_drm_suspend(struct drm_device *dev)
520
{
-
 
521
	struct drm_i915_private *dev_priv = dev->dev_private;
594
{
-
 
595
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 522... Line 596...
522
	struct drm_crtc *crtc;
596
	pci_power_t opregion_target_state;
523
	pci_power_t opregion_target_state;
597
	int error;
524
 
598
 
525
	/* ignore lid events during suspend */
599
	/* ignore lid events during suspend */
Line 533... Line 607...
533
 
607
 
Line 534... Line 608...
534
	drm_kms_helper_poll_disable(dev);
608
	drm_kms_helper_poll_disable(dev);
Line 535... Line -...
535
 
-
 
536
	pci_save_state(dev->pdev);
-
 
537
 
-
 
538
	/* If KMS is active, we do the leavevt stuff here */
-
 
539
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
609
 
540
		int error;
610
	pci_save_state(dev->pdev);
541
 
611
 
542
		error = i915_gem_suspend(dev);
612
	error = i915_gem_suspend(dev);
543
		if (error) {
613
	if (error) {
544
			dev_err(&dev->pdev->dev,
614
		dev_err(&dev->pdev->dev,
Line -... Line 615...
-
 
615
			"GEM idle failed, resume might fail\n");
-
 
616
		return error;
545
				"GEM idle failed, resume might fail\n");
617
	}
Line 546... Line 618...
546
			return error;
618
 
547
		}
619
	intel_guc_suspend(dev);
548
 
620
 
549
		intel_suspend_gt_powersave(dev);
621
	intel_suspend_gt_powersave(dev);
550
 
622
 
551
		/*
-
 
552
		 * Disable CRTCs directly since we want to preserve sw state
623
	/*
553
		 * for _thaw. Also, power gate the CRTC power wells.
624
	 * Disable CRTCs directly since we want to preserve sw state
Line 554... Line 625...
554
		 */
625
	 * for _thaw. Also, power gate the CRTC power wells.
Line 555... Line 626...
555
		drm_modeset_lock_all(dev);
626
	 */
556
		for_each_crtc(dev, crtc)
627
	drm_modeset_lock_all(dev);
Line 557... Line 628...
557
			intel_crtc_control(crtc, false);
628
	intel_display_suspend(dev);
Line 558... Line 629...
558
		drm_modeset_unlock_all(dev);
629
	drm_modeset_unlock_all(dev);
559
 
-
 
Line 560... Line 630...
560
		intel_dp_mst_suspend(dev);
630
 
Line 561... Line 631...
561
 
631
	intel_dp_mst_suspend(dev);
Line 588... Line 658...
588
	intel_display_set_init_power(dev_priv, false);
658
	intel_display_set_init_power(dev_priv, false);
Line 589... Line 659...
589
 
659
 
590
	return 0;
660
	return 0;
Line 591... Line 661...
591
}
661
}
592
 
662
 
593
static int i915_drm_suspend_late(struct drm_device *drm_dev)
663
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
594
{
664
{
Line 595... Line 665...
595
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
665
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
Line 602... Line 672...
602
 
672
 
603
		return ret;
673
		return ret;
Line 604... Line 674...
604
	}
674
	}
-
 
675
 
-
 
676
	pci_disable_device(drm_dev->pdev);
-
 
677
	/*
-
 
678
	 * During hibernation on some platforms the BIOS may try to access
-
 
679
	 * the device even though it's already in D3 and hang the machine. So
-
 
680
	 * leave the device in D0 on those platforms and hope the BIOS will
-
 
681
	 * power down the device properly. The issue was seen on multiple old
-
 
682
	 * GENs with different BIOS vendors, so having an explicit blacklist
-
 
683
	 * is inpractical; apply the workaround on everything pre GEN6. The
-
 
684
	 * platforms where the issue was seen:
-
 
685
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
-
 
686
	 * Fujitsu FSC S7110
-
 
687
	 * Acer Aspire 1830T
605
 
688
	 */
Line 606... Line 689...
606
	pci_disable_device(drm_dev->pdev);
689
	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
607
	pci_set_power_state(drm_dev->pdev, PCI_D3hot);
690
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
Line 608... Line 691...
608
 
691
 
609
	return 0;
692
	return 0;
610
}
693
}
Line 611... Line 694...
611
 
694
 
612
int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
695
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
Line 628... Line 711...
628
 
711
 
629
	error = i915_drm_suspend(dev);
712
	error = i915_drm_suspend(dev);
630
	if (error)
713
	if (error)
Line 631... Line 714...
631
		return error;
714
		return error;
632
 
715
 
Line 633... Line 716...
633
	return i915_drm_suspend_late(dev);
716
	return i915_drm_suspend_late(dev, false);
634
}
717
}
635
 
718
 
Line 636... Line -...
636
static int i915_drm_resume(struct drm_device *dev)
-
 
637
{
719
static int i915_drm_resume(struct drm_device *dev)
638
	struct drm_i915_private *dev_priv = dev->dev_private;
720
{
639
 
721
	struct drm_i915_private *dev_priv = dev->dev_private;
640
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-
 
Line 641... Line 722...
641
		mutex_lock(&dev->struct_mutex);
722
 
642
		i915_gem_restore_gtt_mappings(dev);
723
	mutex_lock(&dev->struct_mutex);
Line 643... Line -...
643
		mutex_unlock(&dev->struct_mutex);
-
 
644
	}
-
 
645
 
724
	i915_gem_restore_gtt_mappings(dev);
646
	i915_restore_state(dev);
725
	mutex_unlock(&dev->struct_mutex);
Line -... Line 726...
-
 
726
 
-
 
727
	i915_restore_state(dev);
-
 
728
	intel_opregion_setup(dev);
-
 
729
 
-
 
730
	intel_init_pch_refclk(dev);
-
 
731
	drm_mode_config_reset(dev);
-
 
732
 
-
 
733
	/*
-
 
734
	 * Interrupts have to be enabled before any batches are run. If not the
-
 
735
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
647
	intel_opregion_setup(dev);
736
	 * update/restore the context.
648
 
737
	 *
649
	/* KMS EnterVT equivalent */
738
	 * Modeset enabling in intel_modeset_init_hw() also needs working
650
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
739
	 * interrupts.
651
		intel_init_pch_refclk(dev);
740
	 */
652
		drm_mode_config_reset(dev);
741
	intel_runtime_pm_enable_interrupts(dev_priv);
Line 653... Line -...
653
 
-
 
654
		mutex_lock(&dev->struct_mutex);
742
 
Line 655... Line 743...
655
		if (i915_gem_init_hw(dev)) {
743
	mutex_lock(&dev->struct_mutex);
Line 656... Line 744...
656
			DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
744
	if (i915_gem_init_hw(dev)) {
657
			atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
745
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
658
		}
746
			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
659
		mutex_unlock(&dev->struct_mutex);
747
	}
Line 660... Line 748...
660
 
748
	mutex_unlock(&dev->struct_mutex);
661
		/* We need working interrupts for modeset enabling ... */
749
 
662
		intel_runtime_pm_enable_interrupts(dev_priv);
750
	intel_guc_resume(dev);
Line 663... Line 751...
663
 
751
 
Line 664... Line 752...
664
		intel_modeset_init_hw(dev);
752
	intel_modeset_init_hw(dev);
Line 681... Line 769...
681
		 * notifications.
769
	 * notifications.
682
		 * */
770
	 * */
683
		intel_hpd_init(dev_priv);
771
	intel_hpd_init(dev_priv);
684
		/* Config may have changed between suspend and resume */
772
	/* Config may have changed between suspend and resume */
685
		drm_helper_hpd_irq_event(dev);
773
	drm_helper_hpd_irq_event(dev);
686
	}
-
 
Line 687... Line 774...
687
 
774
 
Line 688... Line 775...
688
	intel_opregion_init(dev);
775
	intel_opregion_init(dev);
Line 720... Line 807...
720
	pci_set_master(dev->pdev);
807
	pci_set_master(dev->pdev);
Line 721... Line 808...
721
 
808
 
722
	if (IS_VALLEYVIEW(dev_priv))
809
	if (IS_VALLEYVIEW(dev_priv))
723
		ret = vlv_resume_prepare(dev_priv, false);
810
		ret = vlv_resume_prepare(dev_priv, false);
724
	if (ret)
811
	if (ret)
-
 
812
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
Line 725... Line 813...
725
		DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
813
			  ret);
Line -... Line 814...
-
 
814
 
-
 
815
	intel_uncore_early_sanitize(dev, true);
-
 
816
 
-
 
817
	if (IS_BROXTON(dev))
726
 
818
		ret = bxt_resume_prepare(dev_priv);
727
	intel_uncore_early_sanitize(dev, true);
819
	else if (IS_SKYLAKE(dev_priv))
Line 728... Line 820...
728
 
820
		ret = skl_resume_prepare(dev_priv);
729
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
821
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Line 730... Line 822...
730
		hsw_disable_pc8(dev_priv);
822
		hsw_disable_pc8(dev_priv);
731
 
823
 
Line 732... Line 824...
732
	intel_uncore_sanitize(dev);
824
	intel_uncore_sanitize(dev);
733
	intel_power_domains_init_hw(dev_priv);
825
	intel_power_domains_init_hw(dev_priv);
734
 
826
 
Line 735... Line 827...
735
	return ret;
827
	return ret;
736
}
828
}
Line 768... Line 860...
768
{
860
{
769
	struct drm_i915_private *dev_priv = dev->dev_private;
861
	struct drm_i915_private *dev_priv = dev->dev_private;
770
	bool simulated;
862
	bool simulated;
771
	int ret;
863
	int ret;
Line 772... Line 864...
772
 
864
 
773
	if (!i915.reset)
-
 
Line 774... Line 865...
774
		return 0;
865
	intel_reset_gt_powersave(dev);
Line 775... Line 866...
775
 
866
 
Line 813... Line 904...
813
	 *
904
	 *
814
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
905
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
815
	 * was running at the time of the reset (i.e. we weren't VT
906
	 * was running at the time of the reset (i.e. we weren't VT
816
	 * switched away).
907
	 * switched away).
817
	 */
908
	 */
818
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-
 
-
 
909
 
819
		/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
910
	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
820
		dev_priv->gpu_error.reload_in_reset = true;
911
	dev_priv->gpu_error.reload_in_reset = true;
Line 821... Line 912...
821
 
912
 
Line 828... Line 919...
828
			DRM_ERROR("Failed hw init on reset %d\n", ret);
919
		DRM_ERROR("Failed hw init on reset %d\n", ret);
829
			return ret;
920
		return ret;
830
		}
921
	}
Line 831... Line 922...
831
 
922
 
832
		/*
-
 
833
		 * FIXME: This races pretty badly against concurrent holders of
-
 
834
		 * ring interrupts. This is possible since we've started to drop
-
 
835
		 * dev->struct_mutex in select places when waiting for the gpu.
-
 
836
		 */
-
 
837
 
-
 
838
		/*
923
	/*
839
		 * rps/rc6 re-init is necessary to restore state lost after the
924
	 * rps/rc6 re-init is necessary to restore state lost after the
840
		 * reset and the re-install of gt irqs. Skip for ironlake per
925
	 * reset and the re-install of gt irqs. Skip for ironlake per
841
		 * previous concerns that it doesn't respond well to some forms
926
	 * previous concerns that it doesn't respond well to some forms
842
		 * of re-init after reset.
927
	 * of re-init after reset.
843
		 */
928
	 */
844
		if (INTEL_INFO(dev)->gen > 5)
929
	if (INTEL_INFO(dev)->gen > 5)
845
			intel_enable_gt_powersave(dev);
-
 
846
	} else {
-
 
847
		mutex_unlock(&dev->struct_mutex);
-
 
Line 848... Line 930...
848
	}
930
		intel_enable_gt_powersave(dev);
849
 
931
 
Line 850... Line 932...
850
	return 0;
932
	return 0;
Line 867... Line 949...
867
	 * functions have the same PCI-ID!
949
	 * functions have the same PCI-ID!
868
	 */
950
	 */
869
	if (PCI_FUNC(pdev->devfn))
951
	if (PCI_FUNC(pdev->devfn))
870
		return -ENODEV;
952
		return -ENODEV;
Line 871... Line -...
871
 
-
 
872
	driver.driver_features &= ~(DRIVER_USE_AGP);
-
 
873
 
953
 
874
	return drm_get_pci_dev(pdev, ent, &driver);
954
	return drm_get_pci_dev(pdev, ent, &driver);
Line 875... Line 955...
875
}
955
}
876
 
956
 
Line 898... Line 978...
898
	return i915_drm_suspend(drm_dev);
978
	return i915_drm_suspend(drm_dev);
899
}
979
}
Line 900... Line 980...
900
 
980
 
901
static int i915_pm_suspend_late(struct device *dev)
981
static int i915_pm_suspend_late(struct device *dev)
902
{
-
 
903
	struct pci_dev *pdev = to_pci_dev(dev);
982
{
Line 904... Line 983...
904
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
983
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
905
 
984
 
906
	/*
985
	/*
907
	 * We have a suspedn ordering issue with the snd-hda driver also
986
	 * We have a suspend ordering issue with the snd-hda driver also
908
	 * requiring our device to be power up. Due to the lack of a
987
	 * requiring our device to be power up. Due to the lack of a
909
	 * parent/child relationship we currently solve this with an late
988
	 * parent/child relationship we currently solve this with an late
910
	 * suspend hook.
989
	 * suspend hook.
911
	 *
990
	 *
912
	 * FIXME: This should be solved with a special hdmi sink device or
991
	 * FIXME: This should be solved with a special hdmi sink device or
913
	 * similar so that power domains can be employed.
992
	 * similar so that power domains can be employed.
914
	 */
993
	 */
Line -... Line 994...
-
 
994
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
-
 
995
		return 0;
-
 
996
 
-
 
997
	return i915_drm_suspend_late(drm_dev, false);
-
 
998
}
-
 
999
 
-
 
1000
static int i915_pm_poweroff_late(struct device *dev)
-
 
1001
{
-
 
1002
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
-
 
1003
 
915
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1004
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
916
		return 0;
1005
		return 0;
Line 917... Line 1006...
917
 
1006
 
918
	return i915_drm_suspend_late(drm_dev);
1007
	return i915_drm_suspend_late(drm_dev, true);
919
}
-
 
920
 
1008
}
Line 921... Line 1009...
921
static int i915_pm_resume_early(struct device *dev)
1009
 
922
{
1010
static int i915_pm_resume_early(struct device *dev)
Line 923... Line 1011...
923
	struct pci_dev *pdev = to_pci_dev(dev);
1011
{
924
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1012
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
Line 925... Line 1013...
925
 
1013
 
926
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1014
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
927
		return 0;
-
 
928
 
1015
		return 0;
Line 929... Line 1016...
929
	return i915_drm_resume_early(drm_dev);
1016
 
930
}
1017
	return i915_drm_resume_early(drm_dev);
Line 931... Line 1018...
931
 
1018
}
932
static int i915_pm_resume(struct device *dev)
1019
 
Line -... Line 1020...
-
 
1020
static int i915_pm_resume(struct device *dev)
-
 
1021
{
-
 
1022
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
-
 
1023
 
-
 
1024
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
-
 
1025
		return 0;
-
 
1026
 
-
 
1027
	return i915_drm_resume(drm_dev);
-
 
1028
}
933
{
1029
 
934
	struct pci_dev *pdev = to_pci_dev(dev);
1030
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
935
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1031
{
Line 936... Line 1032...
936
 
1032
	/* Enabling DC6 is not a hard requirement to enter runtime D3 */
937
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1033
 
Line -... Line 1034...
-
 
1034
	skl_uninit_cdclk(dev_priv);
-
 
1035
 
-
 
1036
	return 0;
-
 
1037
}
-
 
1038
 
-
 
1039
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
-
 
1040
{
-
 
1041
	hsw_enable_pc8(dev_priv);
-
 
1042
 
-
 
1043
	return 0;
-
 
1044
}
-
 
1045
 
-
 
1046
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
-
 
1047
{
-
 
1048
	struct drm_device *dev = dev_priv->dev;
-
 
1049
 
-
 
1050
	/* TODO: when DC5 support is added disable DC5 here. */
-
 
1051
 
-
 
1052
	broxton_ddi_phy_uninit(dev);
-
 
1053
	broxton_uninit_cdclk(dev);
-
 
1054
	bxt_enable_dc9(dev_priv);
-
 
1055
 
-
 
1056
	return 0;
-
 
1057
}
-
 
1058
 
-
 
1059
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
-
 
1060
{
-
 
1061
	struct drm_device *dev = dev_priv->dev;
-
 
1062
 
-
 
1063
	/* TODO: when CSR FW support is added make sure the FW is loaded */
-
 
1064
 
-
 
1065
	bxt_disable_dc9(dev_priv);
-
 
1066
 
-
 
1067
	/*
-
 
1068
	 * TODO: when DC5 support is added enable DC5 here if the CSR FW
-
 
1069
	 * is available.
-
 
1070
	 */
-
 
1071
	broxton_init_cdclk(dev);
-
 
1072
	broxton_ddi_phy_init(dev);
-
 
1073
	intel_prepare_ddi(dev);
-
 
1074
 
-
 
1075
	return 0;
938
		return 0;
1076
}
939
 
1077
 
940
	return i915_drm_resume(drm_dev);
1078
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
941
}
1079
{
942
 
1080
	struct drm_device *dev = dev_priv->dev;
Line 984... Line 1122...
984
	s->arb_mode		= I915_READ(ARB_MODE);
1122
	s->arb_mode		= I915_READ(ARB_MODE);
985
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
1123
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
986
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
1124
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
Line 987... Line 1125...
987
 
1125
 
988
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1126
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Line 989... Line 1127...
989
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
1127
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
990
 
1128
 
Line 991... Line 1129...
991
	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1129
	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
992
	s->gfx_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1130
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
993
 
1131
 
994
	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
1132
	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
Line 1028... Line 1166...
1028
	s->gt_ier		= I915_READ(GTIER);
1166
	s->gt_ier		= I915_READ(GTIER);
1029
	s->pm_imr		= I915_READ(GEN6_PMIMR);
1167
	s->pm_imr		= I915_READ(GEN6_PMIMR);
1030
	s->pm_ier		= I915_READ(GEN6_PMIER);
1168
	s->pm_ier		= I915_READ(GEN6_PMIER);
Line 1031... Line 1169...
1031
 
1169
 
1032
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1170
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Line 1033... Line 1171...
1033
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
1171
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
1034
 
1172
 
1035
	/* GT SA CZ domain, 0x100000-0x138124 */
1173
	/* GT SA CZ domain, 0x100000-0x138124 */
1036
	s->tilectl		= I915_READ(TILECTL);
1174
	s->tilectl		= I915_READ(TILECTL);
Line 1040... Line 1178...
1040
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
1178
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
Line 1041... Line 1179...
1041
 
1179
 
1042
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1180
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1043
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
1181
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
-
 
1182
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
1044
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
1183
	s->pcbr			= I915_READ(VLV_PCBR);
Line 1045... Line 1184...
1045
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
1184
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
1046
 
1185
 
1047
	/*
1186
	/*
Line 1065... Line 1204...
1065
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
1204
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
1066
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
1205
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
1067
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
1206
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
Line 1068... Line 1207...
1068
 
1207
 
1069
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1208
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Line 1070... Line 1209...
1070
		I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1209
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
1071
 
1210
 
Line 1072... Line 1211...
1072
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1211
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1073
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
1212
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
1074
 
1213
 
1075
	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
1214
	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
Line 1109... Line 1248...
1109
	I915_WRITE(GTIER,		s->gt_ier);
1248
	I915_WRITE(GTIER,		s->gt_ier);
1110
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
1249
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
1111
	I915_WRITE(GEN6_PMIER,		s->pm_ier);
1250
	I915_WRITE(GEN6_PMIER,		s->pm_ier);
Line 1112... Line 1251...
1112
 
1251
 
1113
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1252
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Line 1114... Line 1253...
1114
		I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
1253
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
1115
 
1254
 
1116
	/* GT SA CZ domain, 0x100000-0x138124 */
1255
	/* GT SA CZ domain, 0x100000-0x138124 */
1117
	I915_WRITE(TILECTL,			s->tilectl);
1256
	I915_WRITE(TILECTL,			s->tilectl);
Line 1134... Line 1273...
1134
	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
1273
	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
Line 1135... Line 1274...
1135
 
1274
 
1136
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1275
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1137
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
1276
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
-
 
1277
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
1138
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
1278
	I915_WRITE(VLV_PCBR,			s->pcbr);
1139
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
1279
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
1140
}
1280
}
Line 1141... Line 1281...
1141
#endif
1281
#endif
1142
 
1282
 
1143
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1283
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1144
{
1284
{
Line 1145... Line -...
1145
	u32 val;
-
 
1146
	int err;
-
 
1147
 
-
 
1148
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1285
	u32 val;
1149
	WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
-
 
1150
 
-
 
1151
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
-
 
1152
	/* Wait for a previous force-off to settle */
-
 
1153
	if (force_on) {
-
 
1154
		err = wait_for(!COND, 20);
-
 
1155
		if (err) {
-
 
1156
			DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
-
 
1157
				  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
-
 
Line 1158... Line 1286...
1158
			return err;
1286
	int err;
1159
		}
1287
 
1160
	}
1288
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1161
 
1289
 
Line 1258... Line 1386...
1258
		goto err1;
1386
		goto err1;
Line 1259... Line 1387...
1259
 
1387
 
1260
	err = vlv_allow_gt_wake(dev_priv, false);
1388
	err = vlv_allow_gt_wake(dev_priv, false);
1261
	if (err)
1389
	if (err)
-
 
1390
		goto err2;
-
 
1391
 
1262
		goto err2;
1392
	if (!IS_CHERRYVIEW(dev_priv->dev))
Line 1263... Line 1393...
1263
	vlv_save_gunit_s0ix_state(dev_priv);
1393
		vlv_save_gunit_s0ix_state(dev_priv);
1264
 
1394
 
1265
	err = vlv_force_gfx_clock(dev_priv, false);
1395
	err = vlv_force_gfx_clock(dev_priv, false);
Line 1289... Line 1419...
1289
	 * can do at this point. Return the first error code (which will also
1419
	 * can do at this point. Return the first error code (which will also
1290
	 * leave RPM permanently disabled).
1420
	 * leave RPM permanently disabled).
1291
	 */
1421
	 */
1292
	ret = vlv_force_gfx_clock(dev_priv, true);
1422
	ret = vlv_force_gfx_clock(dev_priv, true);
Line -... Line 1423...
-
 
1423
 
1293
 
1424
	if (!IS_CHERRYVIEW(dev_priv->dev))
Line 1294... Line 1425...
1294
	vlv_restore_gunit_s0ix_state(dev_priv);
1425
		vlv_restore_gunit_s0ix_state(dev_priv);
1295
 
1426
 
1296
	err = vlv_allow_gt_wake(dev_priv, true);
1427
	err = vlv_allow_gt_wake(dev_priv, true);
Line 1322... Line 1453...
1322
		return -ENODEV;
1453
		return -ENODEV;
Line 1323... Line 1454...
1323
 
1454
 
1324
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1455
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
Line 1325... Line -...
1325
		return -ENODEV;
-
 
1326
 
-
 
1327
	assert_force_wake_inactive(dev_priv);
1456
		return -ENODEV;
Line 1328... Line 1457...
1328
 
1457
 
1329
	DRM_DEBUG_KMS("Suspending device\n");
1458
	DRM_DEBUG_KMS("Suspending device\n");
1330
 
1459
 
Line 1350... Line 1479...
1350
	 * an RPM reference.
1479
	 * an RPM reference.
1351
	 */
1480
	 */
1352
	i915_gem_release_all_mmaps(dev_priv);
1481
	i915_gem_release_all_mmaps(dev_priv);
1353
	mutex_unlock(&dev->struct_mutex);
1482
	mutex_unlock(&dev->struct_mutex);
Line -... Line 1483...
-
 
1483
 
-
 
1484
	intel_guc_suspend(dev);
1354
 
1485
 
1355
	intel_suspend_gt_powersave(dev);
1486
	intel_suspend_gt_powersave(dev);
Line 1356... Line 1487...
1356
	intel_runtime_pm_disable_interrupts(dev_priv);
1487
	intel_runtime_pm_disable_interrupts(dev_priv);
1357
 
1488
 
Line 1361... Line 1492...
1361
		intel_runtime_pm_enable_interrupts(dev_priv);
1492
		intel_runtime_pm_enable_interrupts(dev_priv);
Line 1362... Line 1493...
1362
 
1493
 
1363
		return ret;
1494
		return ret;
Line 1364... Line 1495...
1364
	}
1495
	}
-
 
1496
 
1365
 
1497
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Line 1366... Line 1498...
1366
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1498
	intel_uncore_forcewake_reset(dev, false);
1367
	dev_priv->pm.suspended = true;
1499
	dev_priv->pm.suspended = true;
1368
 
1500
 
1369
	/*
1501
	/*
1370
	 * FIXME: We really should find a document that references the arguments
1502
	 * FIXME: We really should find a document that references the arguments
-
 
1503
	 * used below!
-
 
1504
	 */
-
 
1505
	if (IS_BROADWELL(dev)) {
-
 
1506
		/*
-
 
1507
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
-
 
1508
		 * being detected, and the call we do at intel_runtime_resume()
-
 
1509
		 * won't be able to restore them. Since PCI_D3hot matches the
-
 
1510
		 * actual specification and appears to be working, use it.
1371
	 * used below!
1511
		 */
1372
	 */
1512
		intel_opregion_notify_adapter(dev, PCI_D3hot);
1373
	if (IS_HASWELL(dev)) {
1513
	} else {
1374
		/*
1514
		/*
1375
	 * current versions of firmware which depend on this opregion
1515
		 * current versions of firmware which depend on this opregion
1376
	 * notification have repurposed the D1 definition to mean
1516
		 * notification have repurposed the D1 definition to mean
1377
	 * "runtime suspended" vs. what you would normally expect (D3)
1517
		 * "runtime suspended" vs. what you would normally expect (D3)
1378
		 * to distinguish it from notifications that might be sent via
1518
		 * to distinguish it from notifications that might be sent via
1379
		 * the suspend path.
-
 
1380
	 */
-
 
1381
	intel_opregion_notify_adapter(dev, PCI_D1);
-
 
1382
	} else {
-
 
1383
		/*
-
 
1384
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
-
 
1385
		 * being detected, and the call we do at intel_runtime_resume()
-
 
1386
		 * won't be able to restore them. Since PCI_D3hot matches the
-
 
1387
		 * actual specification and appears to be working, use it. Let's
-
 
1388
		 * assume the other non-Haswell platforms will stay the same as
-
 
1389
		 * Broadwell.
1519
		 * the suspend path.
Line -... Line 1520...
-
 
1520
		 */
-
 
1521
		intel_opregion_notify_adapter(dev, PCI_D1);
1390
		 */
1522
	}
1391
		intel_opregion_notify_adapter(dev, PCI_D3hot);
1523
 
1392
	}
1524
	assert_forcewakes_inactive(dev_priv);
Line 1393... Line 1525...
1393
 
1525
 
Line 1408... Line 1540...
1408
	DRM_DEBUG_KMS("Resuming device\n");
1540
	DRM_DEBUG_KMS("Resuming device\n");
Line 1409... Line 1541...
1409
 
1541
 
1410
	intel_opregion_notify_adapter(dev, PCI_D0);
1542
	intel_opregion_notify_adapter(dev, PCI_D0);
Line -... Line 1543...
-
 
1543
	dev_priv->pm.suspended = false;
-
 
1544
 
1411
	dev_priv->pm.suspended = false;
1545
	intel_guc_resume(dev);
1412
 
1546
 
-
 
1547
	if (IS_GEN6(dev_priv))
-
 
1548
		intel_init_pch_refclk(dev);
-
 
1549
 
-
 
1550
	if (IS_BROXTON(dev))
-
 
1551
		ret = bxt_resume_prepare(dev_priv);
1413
	if (IS_GEN6(dev_priv))
1552
	else if (IS_SKYLAKE(dev))
1414
		intel_init_pch_refclk(dev);
1553
		ret = skl_resume_prepare(dev_priv);
1415
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1554
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1416
		hsw_disable_pc8(dev_priv);
1555
		hsw_disable_pc8(dev_priv);
Line 1423... Line 1562...
1423
	 */
1562
	 */
1424
	i915_gem_init_swizzling(dev);
1563
	i915_gem_init_swizzling(dev);
1425
	gen6_update_ring_freq(dev);
1564
	gen6_update_ring_freq(dev);
Line 1426... Line 1565...
1426
 
1565
 
-
 
1566
	intel_runtime_pm_enable_interrupts(dev_priv);
-
 
1567
 
-
 
1568
	/*
-
 
1569
	 * On VLV/CHV display interrupts are part of the display
-
 
1570
	 * power well, so hpd is reinitialized from there. For
-
 
1571
	 * everyone else do it here.
-
 
1572
	 */
-
 
1573
	if (!IS_VALLEYVIEW(dev_priv))
-
 
1574
		intel_hpd_init(dev_priv);
1427
	intel_runtime_pm_enable_interrupts(dev_priv);
1575
 
Line 1428... Line 1576...
1428
	intel_enable_gt_powersave(dev);
1576
	intel_enable_gt_powersave(dev);
1429
 
1577
 
1430
	if (ret)
1578
	if (ret)
Line 1439... Line 1587...
1439
 * This function implements common functionality of runtime and system
1587
 * This function implements common functionality of runtime and system
1440
 * suspend sequence.
1588
 * suspend sequence.
1441
 */
1589
 */
1442
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1590
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1443
{
1591
{
1444
	struct drm_device *dev = dev_priv->dev;
-
 
1445
	int ret;
1592
	int ret;
Line -... Line 1593...
-
 
1593
 
-
 
1594
	if (IS_BROXTON(dev_priv))
-
 
1595
		ret = bxt_suspend_complete(dev_priv);
-
 
1596
	else if (IS_SKYLAKE(dev_priv))
1446
 
1597
		ret = skl_suspend_complete(dev_priv);
1447
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1598
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1448
		ret = hsw_suspend_complete(dev_priv);
1599
		ret = hsw_suspend_complete(dev_priv);
1449
	else if (IS_VALLEYVIEW(dev))
1600
	else if (IS_VALLEYVIEW(dev_priv))
1450
		ret = vlv_suspend_complete(dev_priv);
1601
		ret = vlv_suspend_complete(dev_priv);
1451
	else
1602
	else
Line 1452... Line 1603...
1452
		ret = 0;
1603
		ret = 0;
Line 1482... Line 1633...
1482
	.freeze = i915_pm_suspend,
1633
	.freeze = i915_pm_suspend,
1483
	.freeze_late = i915_pm_suspend_late,
1634
	.freeze_late = i915_pm_suspend_late,
1484
	.thaw_early = i915_pm_resume_early,
1635
	.thaw_early = i915_pm_resume_early,
1485
	.thaw = i915_pm_resume,
1636
	.thaw = i915_pm_resume,
1486
	.poweroff = i915_pm_suspend,
1637
	.poweroff = i915_pm_suspend,
1487
	.poweroff_late = i915_pm_suspend_late,
1638
	.poweroff_late = i915_pm_poweroff_late,
1488
	.restore_early = i915_pm_resume_early,
1639
	.restore_early = i915_pm_resume_early,
1489
	.restore = i915_pm_resume,
1640
	.restore = i915_pm_resume,
Line 1490... Line 1641...
1490
 
1641
 
1491
	/* S0ix (via runtime suspend) event handlers */
1642
	/* S0ix (via runtime suspend) event handlers */
Line 1517... Line 1668...
1517
static struct drm_driver driver = {
1668
static struct drm_driver driver = {
1518
    /* Don't use MTRRs here; the Xserver or userspace app should
1669
	/* Don't use MTRRs here; the Xserver or userspace app should
1519
     * deal with them for Intel hardware.
1670
	 * deal with them for Intel hardware.
1520
     */
1671
	 */
1521
    .driver_features =
1672
	.driver_features =
1522
	    DRIVER_USE_AGP |
-
 
1523
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1673
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1524
	    DRIVER_RENDER,
1674
	    DRIVER_RENDER | DRIVER_MODESET,
1525
    .load = i915_driver_load,
1675
	.load = i915_driver_load,
1526
//    .unload = i915_driver_unload,
1676
//    .unload = i915_driver_unload,
1527
      .open = i915_driver_open,
1677
      .open = i915_driver_open,
1528
//    .lastclose = i915_driver_lastclose,
1678
//    .lastclose = i915_driver_lastclose,
1529
//    .preclose = i915_driver_preclose,
1679
//    .preclose = i915_driver_preclose,
1530
//    .postclose = i915_driver_postclose,
1680
//    .postclose = i915_driver_postclose,
-
 
1681
//	.set_busid = drm_pci_set_busid,
Line 1531... Line -...
1531
 
-
 
1532
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
-
 
1533
//    .suspend = i915_suspend,
-
 
1534
//    .resume = i915_resume,
-
 
1535
 
-
 
1536
//    .device_is_agp = i915_driver_device_is_agp,
1682
 
1537
#if defined(CONFIG_DEBUG_FS)
1683
#if defined(CONFIG_DEBUG_FS)
1538
	.debugfs_init = i915_debugfs_init,
1684
	.debugfs_init = i915_debugfs_init,
1539
	.debugfs_cleanup = i915_debugfs_cleanup,
1685
	.debugfs_cleanup = i915_debugfs_cleanup,
1540
#endif
1686
#endif
Line 1585... Line 1731...
1585
 
1731
 
1586
    return err;
1732
    return err;
Line -... Line 1733...
-
 
1733
}
-
 
1734
 
Line -... Line 1735...
-
 
1735
 
-
 
1736
MODULE_AUTHOR("Tungsten Graphics, Inc.");