Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 5367 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5060 Rev 5354
Line 34... Line 34...
34
#include "i915_trace.h"
34
#include "i915_trace.h"
35
#include "intel_drv.h"
35
#include "intel_drv.h"
Line 36... Line 36...
36
 
36
 
37
#include 
37
#include 
38
#include 
-
 
39
#include 
38
#include 
40
#include 
39
#include 
Line 41... Line 40...
41
#include  
40
#include  
Line 42... Line 41...
42
 
41
 
Line 43... Line -...
43
#include 
-
 
44
 
42
#include 
45
#include 
43
 
Line 46... Line 44...
46
 
44
#include 
47
#define __read_mostly
45
 
48
 
46
#
Line 334... Line 332...
334
	.display_mmio_offset = VLV_DISPLAY_BASE,
332
	.display_mmio_offset = VLV_DISPLAY_BASE,
335
	GEN_CHV_PIPEOFFSETS,
333
	GEN_CHV_PIPEOFFSETS,
336
	CURSOR_OFFSETS,
334
	CURSOR_OFFSETS,
337
};
335
};
Line -... Line 336...
-
 
336
 
-
 
337
static const struct intel_device_info intel_skylake_info = {
-
 
338
	.is_preliminary = 1,
-
 
339
	.is_skylake = 1,
-
 
340
	.gen = 9, .num_pipes = 3,
-
 
341
	.need_gfx_hws = 1, .has_hotplug = 1,
-
 
342
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-
 
343
	.has_llc = 1,
-
 
344
	.has_ddi = 1,
-
 
345
	.has_fbc = 1,
-
 
346
	GEN_DEFAULT_PIPEOFFSETS,
-
 
347
	IVB_CURSOR_OFFSETS,
-
 
348
};
338
 
349
 
339
/*
350
/*
340
 * Make sure any device matches here are from most specific to most
351
 * Make sure any device matches here are from most specific to most
341
 * general.  For example, since the Quanta match is based on the subsystem
352
 * general.  For example, since the Quanta match is based on the subsystem
342
 * and subvendor IDs, we need it to come before the more general IVB
353
 * and subvendor IDs, we need it to come before the more general IVB
Line 366... Line 377...
366
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
377
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
367
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
378
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),	\
368
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
379
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),	\
369
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
380
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),	\
370
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
381
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
371
	INTEL_CHV_IDS(&intel_cherryview_info)
382
	INTEL_CHV_IDS(&intel_cherryview_info),	\
-
 
383
	INTEL_SKL_IDS(&intel_skylake_info)
Line 372... Line 384...
372
 
384
 
373
static const struct pci_device_id pciidlist[] = {       /* aka */
385
static const struct pci_device_id pciidlist[] = {       /* aka */
374
	INTEL_PCI_IDS,
386
	INTEL_PCI_IDS,
375
    {0, 0, 0}
387
    {0, 0, 0}
Line 425... Line 437...
425
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
437
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
426
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
438
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
427
				dev_priv->pch_type = PCH_LPT;
439
				dev_priv->pch_type = PCH_LPT;
428
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
440
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
429
				WARN_ON(!IS_HASWELL(dev));
441
				WARN_ON(!IS_HASWELL(dev));
430
				WARN_ON(IS_ULT(dev));
442
				WARN_ON(IS_HSW_ULT(dev));
431
			} else if (IS_BROADWELL(dev)) {
443
			} else if (IS_BROADWELL(dev)) {
432
				dev_priv->pch_type = PCH_LPT;
444
				dev_priv->pch_type = PCH_LPT;
433
				dev_priv->pch_id =
445
				dev_priv->pch_id =
434
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
446
					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
435
				DRM_DEBUG_KMS("This is Broadwell, assuming "
447
				DRM_DEBUG_KMS("This is Broadwell, assuming "
436
					      "LynxPoint LP PCH\n");
448
					      "LynxPoint LP PCH\n");
437
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
449
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
438
				dev_priv->pch_type = PCH_LPT;
450
				dev_priv->pch_type = PCH_LPT;
439
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
451
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
440
				WARN_ON(!IS_HASWELL(dev));
452
				WARN_ON(!IS_HASWELL(dev));
441
				WARN_ON(!IS_ULT(dev));
453
				WARN_ON(!IS_HSW_ULT(dev));
-
 
454
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
-
 
455
				dev_priv->pch_type = PCH_SPT;
-
 
456
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
-
 
457
				WARN_ON(!IS_SKYLAKE(dev));
-
 
458
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
-
 
459
				dev_priv->pch_type = PCH_SPT;
-
 
460
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
-
 
461
				WARN_ON(!IS_SKYLAKE(dev));
442
			} else
462
			} else
443
				continue;
463
				continue;
Line 444... Line 464...
444
 
464
 
445
			break;
465
			break;
Line 457... Line 477...
457
		return false;
477
		return false;
Line 458... Line 478...
458
 
478
 
459
	if (i915.semaphores >= 0)
479
	if (i915.semaphores >= 0)
Line -... Line 480...
-
 
480
		return i915.semaphores;
-
 
481
 
-
 
482
	/* TODO: make semaphores and Execlists play nicely together */
-
 
483
	if (i915.enable_execlists)
460
		return i915.semaphores;
484
		return false;
461
 
485
 
462
	/* Until we get further testing... */
486
	/* Until we get further testing... */
Line 463... Line 487...
463
	if (IS_GEN8(dev))
487
	if (IS_GEN8(dev))
Line 486... Line 510...
486
			intel_encoder->suspend(intel_encoder);
510
			intel_encoder->suspend(intel_encoder);
487
	}
511
	}
488
	drm_modeset_unlock_all(dev);
512
	drm_modeset_unlock_all(dev);
489
}
513
}
Line -... Line 514...
-
 
514
 
-
 
515
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
-
 
516
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
-
 
517
			      bool rpm_resume);
490
 
518
 
491
static int i915_drm_freeze(struct drm_device *dev)
519
static int i915_drm_suspend(struct drm_device *dev)
492
{
520
{
493
	struct drm_i915_private *dev_priv = dev->dev_private;
521
	struct drm_i915_private *dev_priv = dev->dev_private;
494
	struct drm_crtc *crtc;
522
	struct drm_crtc *crtc;
Line 516... Line 544...
516
			dev_err(&dev->pdev->dev,
544
			dev_err(&dev->pdev->dev,
517
				"GEM idle failed, resume might fail\n");
545
				"GEM idle failed, resume might fail\n");
518
			return error;
546
			return error;
519
		}
547
		}
Line -... Line 548...
-
 
548
 
-
 
549
		intel_suspend_gt_powersave(dev);
520
 
550
 
521
		/*
551
		/*
522
		 * Disable CRTCs directly since we want to preserve sw state
552
		 * Disable CRTCs directly since we want to preserve sw state
523
		 * for _thaw. Also, power gate the CRTC power wells.
553
		 * for _thaw. Also, power gate the CRTC power wells.
524
		 */
554
		 */
Line 527... Line 557...
527
			intel_crtc_control(crtc, false);
557
			intel_crtc_control(crtc, false);
528
		drm_modeset_unlock_all(dev);
558
		drm_modeset_unlock_all(dev);
Line 529... Line 559...
529
 
559
 
Line -... Line 560...
-
 
560
		intel_dp_mst_suspend(dev);
530
		intel_dp_mst_suspend(dev);
561
 
Line 531... Line -...
531
 
-
 
532
		flush_delayed_work(&dev_priv->rps.delayed_resume_work);
562
		intel_runtime_pm_disable_interrupts(dev_priv);
Line 533... Line -...
533
 
-
 
534
		intel_runtime_pm_disable_interrupts(dev);
-
 
535
		intel_suspend_encoders(dev_priv);
563
		intel_hpd_cancel_work(dev_priv);
536
 
564
 
Line 537... Line 565...
537
		intel_suspend_gt_powersave(dev);
565
		intel_suspend_encoders(dev_priv);
Line 538... Line 566...
538
 
566
 
Line 551... Line 579...
551
	intel_opregion_notify_adapter(dev, opregion_target_state);
579
	intel_opregion_notify_adapter(dev, opregion_target_state);
Line 552... Line 580...
552
 
580
 
553
	intel_uncore_forcewake_reset(dev, false);
581
	intel_uncore_forcewake_reset(dev, false);
Line 554... Line -...
554
	intel_opregion_fini(dev);
-
 
555
 
582
	intel_opregion_fini(dev);
556
	console_lock();
-
 
Line 557... Line 583...
557
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
583
 
Line 558... Line 584...
558
	console_unlock();
584
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
Line 559... Line 585...
559
 
585
 
560
	dev_priv->suspend_count++;
586
	dev_priv->suspend_count++;
Line 561... Line 587...
561
 
587
 
562
	intel_display_set_init_power(dev_priv, false);
588
	intel_display_set_init_power(dev_priv, false);
563
 
-
 
564
	return 0;
-
 
565
}
589
 
566
 
-
 
567
int i915_suspend(struct drm_device *dev, pm_message_t state)
-
 
568
{
-
 
569
	int error;
-
 
570
 
-
 
571
	if (!dev || !dev->dev_private) {
-
 
572
		DRM_ERROR("dev: %p\n", dev);
590
	return 0;
573
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
-
 
Line 574... Line 591...
574
		return -ENODEV;
591
}
575
	}
-
 
Line 576... Line -...
576
 
-
 
577
	if (state.event == PM_EVENT_PRETHAW)
592
 
578
		return 0;
593
static int i915_drm_suspend_late(struct drm_device *drm_dev)
Line 579... Line -...
579
 
-
 
580
 
594
{
581
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
-
 
582
		return 0;
-
 
583
 
595
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
Line -... Line 596...
-
 
596
	int ret;
-
 
597
 
-
 
598
	ret = intel_suspend_complete(dev_priv);
584
	error = i915_drm_freeze(dev);
599
 
585
	if (error)
600
	if (ret) {
Line 586... Line 601...
586
		return error;
601
		DRM_ERROR("Suspend complete failed: %d\n", ret);
587
 
602
 
588
	if (state.event == PM_EVENT_SUSPEND) {
-
 
589
		/* Shut down the device */
-
 
590
		pci_disable_device(dev->pdev);
603
		return ret;
591
		pci_set_power_state(dev->pdev, PCI_D3hot);
-
 
Line -... Line 604...
-
 
604
	}
592
	}
605
 
593
 
606
	pci_disable_device(drm_dev->pdev);
594
	return 0;
607
	pci_set_power_state(drm_dev->pdev, PCI_D3hot);
595
}
608
 
Line 596... Line 609...
596
 
609
	return 0;
597
void intel_console_resume(struct work_struct *work)
-
 
598
{
610
}
-
 
611
 
Line 599... Line 612...
599
	struct drm_i915_private *dev_priv =
612
int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
600
		container_of(work, struct drm_i915_private,
613
{
Line 601... Line 614...
601
			     console_resume_work);
614
	int error;
602
	struct drm_device *dev = dev_priv->dev;
615
 
603
 
616
	if (!dev || !dev->dev_private) {
Line 604... Line 617...
604
	console_lock();
617
		DRM_ERROR("dev: %p\n", dev);
605
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
618
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
Line 606... Line 619...
606
	console_unlock();
619
		return -ENODEV;
607
}
620
	}
608
 
621
 
Line 609... Line 622...
609
static int i915_drm_thaw_early(struct drm_device *dev)
622
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
610
{
-
 
611
	struct drm_i915_private *dev_priv = dev->dev_private;
623
			 state.event != PM_EVENT_FREEZE))
612
 
624
		return -EINVAL;
613
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
625
 
614
		hsw_disable_pc8(dev_priv);
626
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Line 644... Line 656...
644
			DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
656
			DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
645
			atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
657
			atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
646
		}
658
		}
647
		mutex_unlock(&dev->struct_mutex);
659
		mutex_unlock(&dev->struct_mutex);
Line -... Line 660...
-
 
660
 
648
 
661
		/* We need working interrupts for modeset enabling ... */
Line 649... Line 662...
649
		intel_runtime_pm_restore_interrupts(dev);
662
		intel_runtime_pm_enable_interrupts(dev_priv);
Line 650... Line -...
650
 
-
 
651
		intel_modeset_init_hw(dev);
-
 
652
 
663
 
653
		{
664
		intel_modeset_init_hw(dev);
654
			unsigned long irqflags;
665
 
655
			spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
666
		spin_lock_irq(&dev_priv->irq_lock);
656
			if (dev_priv->display.hpd_irq_setup)
-
 
Line 657... Line -...
657
				dev_priv->display.hpd_irq_setup(dev);
-
 
658
			spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
667
			if (dev_priv->display.hpd_irq_setup)
659
		}
668
				dev_priv->display.hpd_irq_setup(dev);
660
 
669
		spin_unlock_irq(&dev_priv->irq_lock);
Line -... Line 670...
-
 
670
 
-
 
671
		drm_modeset_lock_all(dev);
661
		intel_dp_mst_resume(dev);
672
		intel_modeset_setup_hw_state(dev, true);
662
		drm_modeset_lock_all(dev);
673
		drm_modeset_unlock_all(dev);
663
		intel_modeset_setup_hw_state(dev, true);
674
 
664
		drm_modeset_unlock_all(dev);
675
		intel_dp_mst_resume(dev);
665
 
676
 
666
		/*
677
		/*
667
		 * ... but also need to make sure that hotplug processing
678
		 * ... but also need to make sure that hotplug processing
668
		 * doesn't cause havoc. Like in the driver load code we don't
679
		 * doesn't cause havoc. Like in the driver load code we don't
669
		 * bother with the tiny race here where we might loose hotplug
680
		 * bother with the tiny race here where we might loose hotplug
670
		 * notifications.
681
		 * notifications.
Line 671... Line 682...
671
		 * */
682
		 * */
Line 672... Line -...
672
		intel_hpd_init(dev);
-
 
673
		/* Config may have changed between suspend and resume */
-
 
674
		drm_helper_hpd_irq_event(dev);
-
 
675
	}
-
 
676
 
-
 
677
	intel_opregion_init(dev);
-
 
678
 
683
		intel_hpd_init(dev_priv);
679
	/*
-
 
680
	 * The console lock can be pretty contented on resume due
-
 
681
	 * to all the printk activity.  Try to keep it out of the hot
-
 
682
	 * path of resume if possible.
-
 
Line 683... Line 684...
683
	 */
684
		/* Config may have changed between suspend and resume */
684
	if (console_trylock()) {
685
		drm_helper_hpd_irq_event(dev);
685
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
686
	}
Line 686... Line 687...
686
		console_unlock();
687
 
Line 687... Line -...
687
	} else {
-
 
688
		schedule_work(&dev_priv->console_resume_work);
-
 
689
	}
-
 
690
 
-
 
691
	mutex_lock(&dev_priv->modeset_restore_lock);
-
 
692
	dev_priv->modeset_restore = MODESET_DONE;
-
 
693
	mutex_unlock(&dev_priv->modeset_restore_lock);
688
	intel_opregion_init(dev);
Line 694... Line 689...
694
 
689
 
695
	intel_opregion_notify_adapter(dev, PCI_D0);
690
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
Line 696... Line 691...
696
 
691
 
697
	return 0;
692
	mutex_lock(&dev_priv->modeset_restore_lock);
698
}
693
	dev_priv->modeset_restore = MODESET_DONE;
699
 
694
	mutex_unlock(&dev_priv->modeset_restore_lock);
Line 700... Line 695...
700
static int i915_drm_thaw(struct drm_device *dev)
695
 
701
{
696
	intel_opregion_notify_adapter(dev, PCI_D0);
702
	if (drm_core_check_feature(dev, DRIVER_MODESET))
697
 
703
		i915_check_and_clear_faults(dev);
698
	drm_kms_helper_poll_enable(dev);
Line 722... Line 717...
722
	if (pci_enable_device(dev->pdev))
717
	if (pci_enable_device(dev->pdev))
723
		return -EIO;
718
		return -EIO;
Line 724... Line 719...
724
 
719
 
Line -... Line 720...
-
 
720
	pci_set_master(dev->pdev);
725
	pci_set_master(dev->pdev);
721
 
726
 
722
	if (IS_VALLEYVIEW(dev_priv))
-
 
723
		ret = vlv_resume_prepare(dev_priv, false);
Line 727... Line 724...
727
	return i915_drm_thaw_early(dev);
724
	if (ret)
728
}
-
 
729
 
-
 
730
int i915_resume(struct drm_device *dev)
-
 
Line 731... Line -...
731
{
-
 
732
	struct drm_i915_private *dev_priv = dev->dev_private;
725
		DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
733
	int ret;
-
 
734
 
726
 
735
	/*
-
 
736
	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
-
 
737
	 * earlier) need to restore the GTT mappings since the BIOS might clear
-
 
738
	 * all our scratch PTEs.
-
 
Line 739... Line 727...
739
	 */
727
	intel_uncore_early_sanitize(dev, true);
-
 
728
 
-
 
729
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
740
	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
730
		hsw_disable_pc8(dev_priv);
741
	if (ret)
731
 
Line 742... Line 732...
742
		return ret;
732
	intel_uncore_sanitize(dev);
743
 
733
	intel_power_domains_init_hw(dev_priv);
744
	drm_kms_helper_poll_enable(dev);
-
 
745
	return 0;
734
 
Line -... Line 735...
-
 
735
	return ret;
746
}
736
}
-
 
737
 
-
 
738
int i915_resume_legacy(struct drm_device *dev)
-
 
739
{
-
 
740
	int ret;
-
 
741
 
-
 
742
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
747
 
743
		return 0;
Line 748... Line 744...
748
static int i915_resume_legacy(struct drm_device *dev)
744
 
749
{
745
	ret = i915_drm_resume_early(dev);
750
	i915_resume_early(dev);
746
	if (ret)
Line 794... Line 790...
794
					  "error for simulated gpu hangs\n");
790
					  "error for simulated gpu hangs\n");
795
				ret = 0;
791
				ret = 0;
796
			}
792
			}
797
	}
793
	}
Line -... Line 794...
-
 
794
 
-
 
795
	if (i915_stop_ring_allow_warn(dev_priv))
-
 
796
		pr_notice("drm/i915: Resetting chip after gpu hang\n");
798
 
797
 
799
	if (ret) {
798
	if (ret) {
800
		DRM_ERROR("Failed to reset chip: %i\n", ret);
799
		DRM_ERROR("Failed to reset chip: %i\n", ret);
801
		mutex_unlock(&dev->struct_mutex);
800
		mutex_unlock(&dev->struct_mutex);
802
		return ret;
801
		return ret;
Line 814... Line 813...
814
	 *
813
	 *
815
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
814
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
816
	 * was running at the time of the reset (i.e. we weren't VT
815
	 * was running at the time of the reset (i.e. we weren't VT
817
	 * switched away).
816
	 * switched away).
818
	 */
817
	 */
819
	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
818
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
820
			!dev_priv->ums.mm_suspended) {
819
		/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
821
		dev_priv->ums.mm_suspended = 0;
820
		dev_priv->gpu_error.reload_in_reset = true;
Line 822... Line 821...
822
 
821
 
-
 
822
		ret = i915_gem_init_hw(dev);
-
 
823
 
-
 
824
		dev_priv->gpu_error.reload_in_reset = false;
823
		ret = i915_gem_init_hw(dev);
825
 
824
		mutex_unlock(&dev->struct_mutex);
826
		mutex_unlock(&dev->struct_mutex);
825
		if (ret) {
827
		if (ret) {
826
			DRM_ERROR("Failed hw init on reset %d\n", ret);
828
			DRM_ERROR("Failed hw init on reset %d\n", ret);
827
			return ret;
829
			return ret;
Line 839... Line 841...
839
		 * previous concerns that it doesn't respond well to some forms
841
		 * previous concerns that it doesn't respond well to some forms
840
		 * of re-init after reset.
842
		 * of re-init after reset.
841
		 */
843
		 */
842
		if (INTEL_INFO(dev)->gen > 5)
844
		if (INTEL_INFO(dev)->gen > 5)
843
			intel_reset_gt_powersave(dev);
845
			intel_reset_gt_powersave(dev);
844
 
-
 
845
		intel_hpd_init(dev);
-
 
846
	} else {
846
	} else {
847
		mutex_unlock(&dev->struct_mutex);
847
		mutex_unlock(&dev->struct_mutex);
848
	}
848
	}
Line 849... Line 849...
849
 
849
 
Line 893... Line 893...
893
	}
893
	}
Line 894... Line 894...
894
 
894
 
895
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
895
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Line 896... Line 896...
896
		return 0;
896
		return 0;
897
 
897
 
Line 898... Line 898...
898
	return i915_drm_freeze(drm_dev);
898
	return i915_drm_suspend(drm_dev);
899
}
899
}
900
 
900
 
901
static int i915_pm_suspend_late(struct device *dev)
901
static int i915_pm_suspend_late(struct device *dev)
902
{
-
 
Line 903... Line 902...
903
	struct pci_dev *pdev = to_pci_dev(dev);
902
{
904
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
903
	struct pci_dev *pdev = to_pci_dev(dev);
905
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
904
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
906
 
905
 
Line 914... Line 913...
914
	 * similar so that power domains can be employed.
913
	 * similar so that power domains can be employed.
915
	 */
914
	 */
916
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
915
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
917
		return 0;
916
		return 0;
Line 918... Line -...
918
 
-
 
919
	if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
-
 
920
		hsw_enable_pc8(dev_priv);
-
 
921
 
917
 
922
	pci_disable_device(pdev);
-
 
923
	pci_set_power_state(pdev, PCI_D3hot);
-
 
924
 
-
 
925
	return 0;
918
	return i915_drm_suspend_late(drm_dev);
Line 926... Line 919...
926
}
919
}
927
 
920
 
928
static int i915_pm_resume_early(struct device *dev)
921
static int i915_pm_resume_early(struct device *dev)
929
{
922
{
Line 930... Line -...
930
	struct pci_dev *pdev = to_pci_dev(dev);
-
 
931
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
 
932
 
-
 
933
	return i915_resume_early(drm_dev);
-
 
934
}
-
 
935
 
-
 
936
static int i915_pm_resume(struct device *dev)
923
	struct pci_dev *pdev = to_pci_dev(dev);
937
{
-
 
938
	struct pci_dev *pdev = to_pci_dev(dev);
-
 
939
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
 
940
 
-
 
941
	return i915_resume(drm_dev);
-
 
942
}
-
 
943
 
-
 
944
static int i915_pm_freeze(struct device *dev)
-
 
945
{
-
 
946
	struct pci_dev *pdev = to_pci_dev(dev);
-
 
947
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
 
948
 
924
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
949
	if (!drm_dev || !drm_dev->dev_private) {
-
 
950
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
-
 
951
		return -ENODEV;
-
 
952
	}
-
 
953
 
-
 
954
	return i915_drm_freeze(drm_dev);
-
 
955
}
-
 
956
 
-
 
957
static int i915_pm_thaw_early(struct device *dev)
-
 
958
{
-
 
959
	struct pci_dev *pdev = to_pci_dev(dev);
-
 
960
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
 
961
 
-
 
962
	return i915_drm_thaw_early(drm_dev);
-
 
963
}
-
 
964
 
-
 
965
static int i915_pm_thaw(struct device *dev)
-
 
Line 966... Line 925...
966
{
925
 
967
	struct pci_dev *pdev = to_pci_dev(dev);
926
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Line 968... Line 927...
968
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
927
		return 0;
969
 
928
 
970
	return i915_drm_thaw(drm_dev);
929
	return i915_drm_resume_early(drm_dev);
971
}
930
}
Line 972... Line -...
972
 
-
 
973
static int i915_pm_poweroff(struct device *dev)
-
 
974
{
-
 
975
	struct pci_dev *pdev = to_pci_dev(dev);
931
 
976
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
 
977
 
-
 
978
	return i915_drm_freeze(drm_dev);
-
 
979
}
932
static int i915_pm_resume(struct device *dev)
980
 
-
 
Line 981... Line -...
981
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
-
 
982
{
-
 
983
	hsw_enable_pc8(dev_priv);
-
 
984
 
-
 
985
	return 0;
933
{
986
}
-
 
987
 
-
 
988
static int snb_runtime_resume(struct drm_i915_private *dev_priv)
934
	struct pci_dev *pdev = to_pci_dev(dev);
Line 989... Line 935...
989
{
935
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
990
	struct drm_device *dev = dev_priv->dev;
936
 
991
 
937
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Line 992... Line 938...
992
	intel_init_pch_refclk(dev);
938
		return 0;
993
 
939
 
Line 994... Line 940...
994
	return 0;
940
	return i915_drm_resume(drm_dev);
Line 1289... Line 1235...
1289
 
1235
 
1290
	DRM_ERROR("GT register access while GT waking disabled\n");
1236
	DRM_ERROR("GT register access while GT waking disabled\n");
1291
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1237
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
Line 1292... Line 1238...
1292
}
1238
}
1293
 
1239
 
1294
static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
1240
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1295
{
1241
{
Line 1296... Line 1242...
1296
	u32 mask;
1242
	u32 mask;
Line 1329... Line 1275...
1329
	vlv_force_gfx_clock(dev_priv, false);
1275
	vlv_force_gfx_clock(dev_priv, false);
Line 1330... Line 1276...
1330
 
1276
 
1331
	return err;
1277
	return err;
Line 1332... Line 1278...
1332
}
1278
}
-
 
1279
 
1333
 
1280
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1334
static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
1281
				bool rpm_resume)
1335
{
1282
{
1336
	struct drm_device *dev = dev_priv->dev;
1283
	struct drm_device *dev = dev_priv->dev;
Line 1354... Line 1301...
1354
	if (!ret)
1301
	if (!ret)
1355
		ret = err;
1302
		ret = err;
Line 1356... Line 1303...
1356
 
1303
 
Line -... Line 1304...
-
 
1304
	vlv_check_no_gt_access(dev_priv);
1357
	vlv_check_no_gt_access(dev_priv);
1305
 
1358
 
1306
	if (rpm_resume) {
-
 
1307
	intel_init_clock_gating(dev);
Line 1359... Line 1308...
1359
	intel_init_clock_gating(dev);
1308
	i915_gem_restore_fences(dev);
1360
	i915_gem_restore_fences(dev);
1309
	}
Line 1361... Line 1310...
1361
 
1310
 
Line 1370... Line 1319...
1370
	int ret;
1319
	int ret;
Line 1371... Line 1320...
1371
 
1320
 
1372
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1321
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
Line 1373... Line 1322...
1373
		return -ENODEV;
1322
		return -ENODEV;
-
 
1323
 
-
 
1324
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1374
 
1325
		return -ENODEV;
Line 1375... Line 1326...
1375
	WARN_ON(!HAS_RUNTIME_PM(dev));
1326
 
Line 1376... Line 1327...
1376
	assert_force_wake_inactive(dev_priv);
1327
	assert_force_wake_inactive(dev_priv);
Line 1399... Line 1350...
1399
	 * an RPM reference.
1350
	 * an RPM reference.
1400
	 */
1351
	 */
1401
	i915_gem_release_all_mmaps(dev_priv);
1352
	i915_gem_release_all_mmaps(dev_priv);
1402
	mutex_unlock(&dev->struct_mutex);
1353
	mutex_unlock(&dev->struct_mutex);
Line 1403... Line -...
1403
 
-
 
1404
	/*
-
 
1405
	 * rps.work can't be rearmed here, since we get here only after making
-
 
1406
	 * sure the GPU is idle and the RPS freq is set to the minimum. See
-
 
1407
	 * intel_mark_idle().
-
 
1408
	 */
1354
 
1409
	cancel_work_sync(&dev_priv->rps.work);
1355
	intel_suspend_gt_powersave(dev);
1410
	intel_runtime_pm_disable_interrupts(dev);
-
 
1411
 
-
 
1412
	if (IS_GEN6(dev)) {
-
 
1413
		ret = 0;
-
 
1414
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-
 
1415
		ret = hsw_runtime_suspend(dev_priv);
-
 
1416
	} else if (IS_VALLEYVIEW(dev)) {
-
 
1417
		ret = vlv_runtime_suspend(dev_priv);
-
 
1418
	} else {
-
 
1419
		ret = -ENODEV;
-
 
1420
		WARN_ON(1);
-
 
Line -... Line 1356...
-
 
1356
	intel_runtime_pm_disable_interrupts(dev_priv);
1421
	}
1357
 
1422
 
1358
	ret = intel_suspend_complete(dev_priv);
1423
	if (ret) {
1359
	if (ret) {
Line 1424... Line 1360...
1424
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1360
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1425
		intel_runtime_pm_restore_interrupts(dev);
1361
		intel_runtime_pm_enable_interrupts(dev_priv);
Line 1426... Line 1362...
1426
 
1362
 
1427
		return ret;
1363
		return ret;
Line 1428... Line 1364...
1428
	}
1364
	}
-
 
1365
 
-
 
1366
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
-
 
1367
	dev_priv->pm.suspended = true;
-
 
1368
 
-
 
1369
	/*
1429
 
1370
	 * FIXME: We really should find a document that references the arguments
1430
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1371
	 * used below!
1431
	dev_priv->pm.suspended = true;
1372
	 */
1432
 
1373
	if (IS_HASWELL(dev)) {
1433
	/*
1374
		/*
1434
	 * current versions of firmware which depend on this opregion
1375
	 * current versions of firmware which depend on this opregion
1435
	 * notification have repurposed the D1 definition to mean
1376
	 * notification have repurposed the D1 definition to mean
-
 
1377
	 * "runtime suspended" vs. what you would normally expect (D3)
-
 
1378
		 * to distinguish it from notifications that might be sent via
-
 
1379
		 * the suspend path.
-
 
1380
	 */
-
 
1381
	intel_opregion_notify_adapter(dev, PCI_D1);
-
 
1382
	} else {
-
 
1383
		/*
-
 
1384
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
-
 
1385
		 * being detected, and the call we do at intel_runtime_resume()
-
 
1386
		 * won't be able to restore them. Since PCI_D3hot matches the
-
 
1387
		 * actual specification and appears to be working, use it. Let's
Line 1436... Line 1388...
1436
	 * "runtime suspended" vs. what you would normally expect (D3)
1388
		 * assume the other non-Haswell platforms will stay the same as
1437
	 * to distinguish it from notifications that might be sent
1389
		 * Broadwell.
1438
	 * via the suspend path.
1390
		 */
Line 1439... Line 1391...
1439
	 */
1391
		intel_opregion_notify_adapter(dev, PCI_D3hot);
1440
	intel_opregion_notify_adapter(dev, PCI_D1);
1392
	}
1441
 
1393
 
1442
	DRM_DEBUG_KMS("Device suspended\n");
1394
	DRM_DEBUG_KMS("Device suspended\n");
1443
	return 0;
1395
	return 0;
1444
}
1396
}
Line 1445... Line 1397...
1445
 
1397
 
-
 
1398
static int intel_runtime_resume(struct device *device)
Line 1446... Line 1399...
1446
static int intel_runtime_resume(struct device *device)
1399
{
Line 1447... Line 1400...
1447
{
1400
	struct pci_dev *pdev = to_pci_dev(device);
1448
	struct pci_dev *pdev = to_pci_dev(device);
1401
	struct drm_device *dev = pci_get_drvdata(pdev);
Line 1449... Line 1402...
1449
	struct drm_device *dev = pci_get_drvdata(pdev);
1402
	struct drm_i915_private *dev_priv = dev->dev_private;
1450
	struct drm_i915_private *dev_priv = dev->dev_private;
1403
	int ret = 0;
1451
	int ret;
1404
 
1452
 
1405
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1453
	WARN_ON(!HAS_RUNTIME_PM(dev));
1406
		return -ENODEV;
1454
 
1407
 
1455
	DRM_DEBUG_KMS("Resuming device\n");
-
 
1456
 
-
 
1457
	intel_opregion_notify_adapter(dev, PCI_D0);
-
 
1458
	dev_priv->pm.suspended = false;
-
 
Line 1459... Line 1408...
1459
 
1408
	DRM_DEBUG_KMS("Resuming device\n");
1460
	if (IS_GEN6(dev)) {
1409
 
1461
		ret = snb_runtime_resume(dev_priv);
1410
	intel_opregion_notify_adapter(dev, PCI_D0);
1462
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1411
	dev_priv->pm.suspended = false;
1463
		ret = hsw_runtime_resume(dev_priv);
1412
 
1464
	} else if (IS_VALLEYVIEW(dev)) {
1413
	if (IS_GEN6(dev_priv))
Line 1465... Line 1414...
1465
		ret = vlv_runtime_resume(dev_priv);
1414
		intel_init_pch_refclk(dev);
1466
	} else {
1415
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Line 1467... Line 1416...
1467
		WARN_ON(1);
1416
		hsw_disable_pc8(dev_priv);
1468
		ret = -ENODEV;
1417
	else if (IS_VALLEYVIEW(dev_priv))
1469
	}
1418
		ret = vlv_resume_prepare(dev_priv, true);
1470
 
1419
 
Line 1471... Line 1420...
1471
	/*
1420
	/*
1472
	 * No point of rolling back things in case of an error, as the best
1421
	 * No point of rolling back things in case of an error, as the best
Line -... Line 1422...
-
 
1422
	 * we can do is to hope that things will still work (and disable RPM).
-
 
1423
	 */
-
 
1424
	i915_gem_init_swizzling(dev);
-
 
1425
	gen6_update_ring_freq(dev);
-
 
1426
 
-
 
1427
	intel_runtime_pm_enable_interrupts(dev_priv);
-
 
1428
	intel_enable_gt_powersave(dev);
-
 
1429
 
-
 
1430
	if (ret)
-
 
1431
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
-
 
1432
	else
-
 
1433
		DRM_DEBUG_KMS("Device resumed\n");
-
 
1434
 
-
 
1435
	return ret;
-
 
1436
}
-
 
1437
 
-
 
1438
/*
-
 
1439
 * This function implements common functionality of runtime and system
-
 
1440
 * suspend sequence.
1473
	 * we can do is to hope that things will still work (and disable RPM).
1441
 */
-
 
1442
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
-
 
1443
{
-
 
1444
	struct drm_device *dev = dev_priv->dev;
-
 
1445
	int ret;
1474
	 */
1446
 
1475
	i915_gem_init_swizzling(dev);
1447
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1476
	gen6_update_ring_freq(dev);
1448
		ret = hsw_suspend_complete(dev_priv);
1477
 
1449
	else if (IS_VALLEYVIEW(dev))
-
 
1450
		ret = vlv_suspend_complete(dev_priv);
-
 
1451
	else
-
 
1452
		ret = 0;
-
 
1453
 
-
 
1454
	return ret;
-
 
1455
}
-
 
1456
 
-
 
1457
static const struct dev_pm_ops i915_pm_ops = {
-
 
1458
	/*
-
 
1459
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
-
 
1460
	 * PMSG_RESUME]
-
 
1461
	 */
-
 
1462
	.suspend = i915_pm_suspend,
-
 
1463
	.suspend_late = i915_pm_suspend_late,
-
 
1464
	.resume_early = i915_pm_resume_early,
-
 
1465
	.resume = i915_pm_resume,
1478
	intel_runtime_pm_restore_interrupts(dev);
1466
 
-
 
1467
	/*
1479
	intel_reset_gt_powersave(dev);
1468
	 * S4 event handlers
1480
 
1469
	 * @freeze, @freeze_late    : called (1) before creating the
1481
	if (ret)
1470
	 *                            hibernation image [PMSG_FREEZE] and
-
 
1471
	 *                            (2) after rebooting, before restoring
1482
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1472
	 *                            the image [PMSG_QUIESCE]
1483
	else
1473
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
-
 
1474
	 *                            image, before writing it [PMSG_THAW]
-
 
1475
	 *                            and (2) after failing to create or
1484
		DRM_DEBUG_KMS("Device resumed\n");
1476
	 *                            restore the image [PMSG_RECOVER]
1485
 
1477
	 * @poweroff, @poweroff_late: called after writing the hibernation
1486
	return ret;
1478
	 *                            image, before rebooting [PMSG_HIBERNATE]
Line 1487... Line 1479...
1487
}
1479
	 * @restore, @restore_early : called after rebooting and restoring the
Line 1540... Line 1532...
1540
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1532
    /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1541
//    .suspend = i915_suspend,
1533
//    .suspend = i915_suspend,
1542
//    .resume = i915_resume,
1534
//    .resume = i915_resume,
Line 1543... Line 1535...
1543
 
1535
 
1544
//    .device_is_agp = i915_driver_device_is_agp,
-
 
1545
//    .master_create = i915_master_create,
-
 
1546
//    .master_destroy = i915_master_destroy,
1536
//    .device_is_agp = i915_driver_device_is_agp,
1547
#if defined(CONFIG_DEBUG_FS)
1537
#if defined(CONFIG_DEBUG_FS)
1548
	.debugfs_init = i915_debugfs_init,
1538
	.debugfs_init = i915_debugfs_init,
1549
	.debugfs_cleanup = i915_debugfs_cleanup,
1539
	.debugfs_cleanup = i915_debugfs_cleanup,
1550
#endif
1540
#endif