Subversion Repositories Kolibri OS

Rev

Rev 6660 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2
 */
3
/*
4
 *
5
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the
10
 * "Software"), to deal in the Software without restriction, including
11
 * without limitation the rights to use, copy, modify, merge, publish,
12
 * distribute, sub license, and/or sell copies of the Software, and to
13
 * permit persons to whom the Software is furnished to do so, subject to
14
 * the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the
17
 * next paragraph) shall be included in all copies or substantial portions
18
 * of the Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
 *
28
 */
29
 
6084 serge 30
#include 
6660 serge 31
#include 
3031 serge 32
#include 
33
#include 
2330 Serge 34
#include "i915_drv.h"
4126 Serge 35
#include "i915_trace.h"
2330 Serge 36
#include "intel_drv.h"
2325 Serge 37
 
38
#include 
6084 serge 39
#include 
3031 serge 40
#include 
41
 
2325 Serge 42
#include 
43
 
6937 serge 44
int init_display_kms(struct drm_device *dev);
45
 
46
extern int intel_agp_enabled;
47
 
5060 serge 48
static struct drm_driver driver;
2330 Serge 49
 
5060 serge 50
#define GEN_DEFAULT_PIPEOFFSETS \
51
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
52
			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
53
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
54
			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
55
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
3031 serge 56
 
5060 serge 57
#define GEN_CHV_PIPEOFFSETS \
58
	.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
59
			  CHV_PIPE_C_OFFSET }, \
60
	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
61
			   CHV_TRANSCODER_C_OFFSET, }, \
62
	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
63
			     CHV_PALETTE_C_OFFSET }
3031 serge 64
 
5060 serge 65
#define CURSOR_OFFSETS \
66
	.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
2330 Serge 67
 
5060 serge 68
#define IVB_CURSOR_OFFSETS \
69
	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
2330 Serge 70
 
71
 
3031 serge 72
 
4104 Serge 73
 
2339 Serge 74
static const struct intel_device_info intel_i915g_info = {
3746 Serge 75
	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 76
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 77
	.ring_mask = RENDER_RING,
5060 serge 78
	GEN_DEFAULT_PIPEOFFSETS,
79
	CURSOR_OFFSETS,
2339 Serge 80
};
81
static const struct intel_device_info intel_i915gm_info = {
3746 Serge 82
	.gen = 3, .is_mobile = 1, .num_pipes = 2,
2339 Serge 83
	.cursor_needs_physical = 1,
84
	.has_overlay = 1, .overlay_needs_physical = 1,
85
	.supports_tv = 1,
4560 Serge 86
	.has_fbc = 1,
87
	.ring_mask = RENDER_RING,
5060 serge 88
	GEN_DEFAULT_PIPEOFFSETS,
89
	CURSOR_OFFSETS,
2339 Serge 90
};
91
static const struct intel_device_info intel_i945g_info = {
3746 Serge 92
	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
2339 Serge 93
	.has_overlay = 1, .overlay_needs_physical = 1,
4560 Serge 94
	.ring_mask = RENDER_RING,
5060 serge 95
	GEN_DEFAULT_PIPEOFFSETS,
96
	CURSOR_OFFSETS,
2339 Serge 97
};
98
static const struct intel_device_info intel_i945gm_info = {
3746 Serge 99
	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 100
	.has_hotplug = 1, .cursor_needs_physical = 1,
101
	.has_overlay = 1, .overlay_needs_physical = 1,
102
	.supports_tv = 1,
4560 Serge 103
	.has_fbc = 1,
104
	.ring_mask = RENDER_RING,
5060 serge 105
	GEN_DEFAULT_PIPEOFFSETS,
106
	CURSOR_OFFSETS,
2339 Serge 107
};
108
 
109
static const struct intel_device_info intel_i965g_info = {
3746 Serge 110
	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
2339 Serge 111
	.has_hotplug = 1,
112
	.has_overlay = 1,
4560 Serge 113
	.ring_mask = RENDER_RING,
5060 serge 114
	GEN_DEFAULT_PIPEOFFSETS,
115
	CURSOR_OFFSETS,
2339 Serge 116
};
117
 
118
static const struct intel_device_info intel_i965gm_info = {
3746 Serge 119
	.gen = 4, .is_crestline = 1, .num_pipes = 2,
2339 Serge 120
	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
121
	.has_overlay = 1,
122
	.supports_tv = 1,
4560 Serge 123
	.ring_mask = RENDER_RING,
5060 serge 124
	GEN_DEFAULT_PIPEOFFSETS,
125
	CURSOR_OFFSETS,
2339 Serge 126
};
127
 
128
static const struct intel_device_info intel_g33_info = {
3746 Serge 129
	.gen = 3, .is_g33 = 1, .num_pipes = 2,
2339 Serge 130
	.need_gfx_hws = 1, .has_hotplug = 1,
131
	.has_overlay = 1,
4560 Serge 132
	.ring_mask = RENDER_RING,
5060 serge 133
	GEN_DEFAULT_PIPEOFFSETS,
134
	CURSOR_OFFSETS,
2339 Serge 135
};
136
 
137
static const struct intel_device_info intel_g45_info = {
3746 Serge 138
	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
2339 Serge 139
	.has_pipe_cxsr = 1, .has_hotplug = 1,
4560 Serge 140
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 141
	GEN_DEFAULT_PIPEOFFSETS,
142
	CURSOR_OFFSETS,
2339 Serge 143
};
144
 
145
static const struct intel_device_info intel_gm45_info = {
3746 Serge 146
	.gen = 4, .is_g4x = 1, .num_pipes = 2,
2339 Serge 147
	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
148
	.has_pipe_cxsr = 1, .has_hotplug = 1,
149
	.supports_tv = 1,
4560 Serge 150
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 151
	GEN_DEFAULT_PIPEOFFSETS,
152
	CURSOR_OFFSETS,
2339 Serge 153
};
154
 
155
static const struct intel_device_info intel_pineview_info = {
3746 Serge 156
	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
2339 Serge 157
	.need_gfx_hws = 1, .has_hotplug = 1,
158
	.has_overlay = 1,
5060 serge 159
	GEN_DEFAULT_PIPEOFFSETS,
160
	CURSOR_OFFSETS,
2339 Serge 161
};
162
 
163
static const struct intel_device_info intel_ironlake_d_info = {
3746 Serge 164
	.gen = 5, .num_pipes = 2,
3031 serge 165
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 166
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 167
	GEN_DEFAULT_PIPEOFFSETS,
168
	CURSOR_OFFSETS,
2339 Serge 169
};
170
 
171
static const struct intel_device_info intel_ironlake_m_info = {
3746 Serge 172
	.gen = 5, .is_mobile = 1, .num_pipes = 2,
2339 Serge 173
	.need_gfx_hws = 1, .has_hotplug = 1,
174
	.has_fbc = 1,
4560 Serge 175
	.ring_mask = RENDER_RING | BSD_RING,
5060 serge 176
	GEN_DEFAULT_PIPEOFFSETS,
177
	CURSOR_OFFSETS,
2339 Serge 178
};
179
 
2325 Serge 180
static const struct intel_device_info intel_sandybridge_d_info = {
3746 Serge 181
	.gen = 6, .num_pipes = 2,
2330 Serge 182
	.need_gfx_hws = 1, .has_hotplug = 1,
4560 Serge 183
	.has_fbc = 1,
184
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 185
	.has_llc = 1,
5060 serge 186
	GEN_DEFAULT_PIPEOFFSETS,
187
	CURSOR_OFFSETS,
2325 Serge 188
};
189
 
190
static const struct intel_device_info intel_sandybridge_m_info = {
3746 Serge 191
	.gen = 6, .is_mobile = 1, .num_pipes = 2,
2330 Serge 192
	.need_gfx_hws = 1, .has_hotplug = 1,
6084 serge 193
	.has_fbc = 1,
4560 Serge 194
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
3031 serge 195
	.has_llc = 1,
5060 serge 196
	GEN_DEFAULT_PIPEOFFSETS,
197
	CURSOR_OFFSETS,
2325 Serge 198
};
199
 
3746 Serge 200
#define GEN7_FEATURES  \
201
	.gen = 7, .num_pipes = 3, \
202
	.need_gfx_hws = 1, .has_hotplug = 1, \
4560 Serge 203
	.has_fbc = 1, \
204
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
6937 serge 205
	.has_llc = 1, \
206
	GEN_DEFAULT_PIPEOFFSETS, \
207
	IVB_CURSOR_OFFSETS
3746 Serge 208
 
2339 Serge 209
static const struct intel_device_info intel_ivybridge_d_info = {
3746 Serge 210
	GEN7_FEATURES,
211
	.is_ivybridge = 1,
2339 Serge 212
};
2325 Serge 213
 
2339 Serge 214
static const struct intel_device_info intel_ivybridge_m_info = {
3746 Serge 215
	GEN7_FEATURES,
216
	.is_ivybridge = 1,
217
	.is_mobile = 1,
2339 Serge 218
};
219
 
3746 Serge 220
static const struct intel_device_info intel_ivybridge_q_info = {
221
	GEN7_FEATURES,
222
	.is_ivybridge = 1,
223
	.num_pipes = 0, /* legal, last one wins */
224
};
225
 
6937 serge 226
#define VLV_FEATURES  \
227
	.gen = 7, .num_pipes = 2, \
228
	.need_gfx_hws = 1, .has_hotplug = 1, \
229
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
230
	.display_mmio_offset = VLV_DISPLAY_BASE, \
231
	GEN_DEFAULT_PIPEOFFSETS, \
232
	CURSOR_OFFSETS
233
 
3031 serge 234
static const struct intel_device_info intel_valleyview_m_info = {
6937 serge 235
	VLV_FEATURES,
236
	.is_valleyview = 1,
3746 Serge 237
	.is_mobile = 1,
3031 serge 238
};
239
 
240
static const struct intel_device_info intel_valleyview_d_info = {
6937 serge 241
	VLV_FEATURES,
3031 serge 242
	.is_valleyview = 1,
243
};
244
 
6937 serge 245
#define HSW_FEATURES  \
246
	GEN7_FEATURES, \
247
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
248
	.has_ddi = 1, \
249
	.has_fpga_dbg = 1
250
 
3031 serge 251
static const struct intel_device_info intel_haswell_d_info = {
6937 serge 252
	HSW_FEATURES,
3746 Serge 253
	.is_haswell = 1,
3031 serge 254
};
255
 
256
static const struct intel_device_info intel_haswell_m_info = {
6937 serge 257
	HSW_FEATURES,
3746 Serge 258
	.is_haswell = 1,
259
	.is_mobile = 1,
3031 serge 260
};
261
 
4560 Serge 262
static const struct intel_device_info intel_broadwell_d_info = {
6937 serge 263
	HSW_FEATURES,
264
	.gen = 8,
4560 Serge 265
};
266
 
267
static const struct intel_device_info intel_broadwell_m_info = {
6937 serge 268
	HSW_FEATURES,
269
	.gen = 8, .is_mobile = 1,
4560 Serge 270
};
271
 
5060 serge 272
static const struct intel_device_info intel_broadwell_gt3d_info = {
6937 serge 273
	HSW_FEATURES,
274
	.gen = 8,
5060 serge 275
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
276
};
277
 
278
static const struct intel_device_info intel_broadwell_gt3m_info = {
6937 serge 279
	HSW_FEATURES,
280
	.gen = 8, .is_mobile = 1,
5060 serge 281
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
282
};
283
 
284
static const struct intel_device_info intel_cherryview_info = {
285
	.gen = 8, .num_pipes = 3,
286
	.need_gfx_hws = 1, .has_hotplug = 1,
287
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
6937 serge 288
	.is_cherryview = 1,
5060 serge 289
	.display_mmio_offset = VLV_DISPLAY_BASE,
290
	GEN_CHV_PIPEOFFSETS,
291
	CURSOR_OFFSETS,
292
};
293
 
5354 serge 294
static const struct intel_device_info intel_skylake_info = {
6937 serge 295
	HSW_FEATURES,
5354 serge 296
	.is_skylake = 1,
6937 serge 297
	.gen = 9,
5354 serge 298
};
299
 
6084 serge 300
static const struct intel_device_info intel_skylake_gt3_info = {
6937 serge 301
	HSW_FEATURES,
6084 serge 302
	.is_skylake = 1,
6937 serge 303
	.gen = 9,
6084 serge 304
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
305
};
306
 
307
static const struct intel_device_info intel_broxton_info = {
308
	.is_preliminary = 1,
6937 serge 309
	.is_broxton = 1,
6084 serge 310
	.gen = 9,
311
	.need_gfx_hws = 1, .has_hotplug = 1,
312
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
313
	.num_pipes = 3,
314
	.has_ddi = 1,
315
	.has_fpga_dbg = 1,
316
	.has_fbc = 1,
317
	GEN_DEFAULT_PIPEOFFSETS,
318
	IVB_CURSOR_OFFSETS,
319
};
320
 
6937 serge 321
static const struct intel_device_info intel_kabylake_info = {
322
	HSW_FEATURES,
323
	.is_preliminary = 1,
324
	.is_kabylake = 1,
325
	.gen = 9,
326
};
327
 
328
static const struct intel_device_info intel_kabylake_gt3_info = {
329
	HSW_FEATURES,
330
	.is_preliminary = 1,
331
	.is_kabylake = 1,
332
	.gen = 9,
333
	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
334
};
335
 
4104 Serge 336
/*
337
 * Make sure any device matches here are from most specific to most
338
 * general.  For example, since the Quanta match is based on the subsystem
339
 * and subvendor IDs, we need it to come before the more general IVB
340
 * PCI ID matches, otherwise we'll use the wrong info struct above.
341
 */
6937 serge 342
static const struct pci_device_id pciidlist[] = {
343
	INTEL_I915G_IDS(&intel_i915g_info),
344
	INTEL_I915GM_IDS(&intel_i915gm_info),
345
	INTEL_I945G_IDS(&intel_i945g_info),
346
	INTEL_I945GM_IDS(&intel_i945gm_info),
347
	INTEL_I965G_IDS(&intel_i965g_info),
348
	INTEL_G33_IDS(&intel_g33_info),
349
	INTEL_I965GM_IDS(&intel_i965gm_info),
350
	INTEL_GM45_IDS(&intel_gm45_info),
351
	INTEL_G45_IDS(&intel_g45_info),
352
	INTEL_PINEVIEW_IDS(&intel_pineview_info),
353
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
354
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
355
	INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
356
	INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
357
	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
358
	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
359
	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
360
	INTEL_HSW_D_IDS(&intel_haswell_d_info),
361
	INTEL_HSW_M_IDS(&intel_haswell_m_info),
362
	INTEL_VLV_M_IDS(&intel_valleyview_m_info),
363
	INTEL_VLV_D_IDS(&intel_valleyview_d_info),
364
	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
365
	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
366
	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
367
	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
368
	INTEL_CHV_IDS(&intel_cherryview_info),
369
	INTEL_SKL_GT1_IDS(&intel_skylake_info),
370
	INTEL_SKL_GT2_IDS(&intel_skylake_info),
371
	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
372
	INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
373
	INTEL_BXT_IDS(&intel_broxton_info),
374
	INTEL_KBL_GT1_IDS(&intel_kabylake_info),
375
	INTEL_KBL_GT2_IDS(&intel_kabylake_info),
376
	INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
377
	INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
6084 serge 378
	{0, 0, 0}
2325 Serge 379
};
380
 
6937 serge 381
 
382
 
6084 serge 383
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
384
{
385
	enum intel_pch ret = PCH_NOP;
2325 Serge 386
 
6084 serge 387
	/*
388
	 * In a virtualized passthrough environment we can be in a
389
	 * setup where the ISA bridge is not able to be passed through.
390
	 * In this case, a south bridge can be emulated and we have to
391
	 * make an educated guess as to which PCH is really there.
392
	 */
393
 
394
	if (IS_GEN5(dev)) {
395
		ret = PCH_IBX;
396
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
397
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
398
		ret = PCH_CPT;
399
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
400
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
401
		ret = PCH_LPT;
402
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
6937 serge 403
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
6084 serge 404
		ret = PCH_SPT;
405
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
406
	}
407
 
408
	return ret;
409
}
410
 
2342 Serge 411
void intel_detect_pch(struct drm_device *dev)
2326 Serge 412
{
6084 serge 413
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 414
	struct pci_dev *pch = NULL;
2326 Serge 415
 
3746 Serge 416
	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
417
	 * (which really amounts to a PCH but no South Display).
418
	 */
419
	if (INTEL_INFO(dev)->num_pipes == 0) {
420
		dev_priv->pch_type = PCH_NOP;
421
		return;
422
	}
423
 
6084 serge 424
	/*
425
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
426
	 * make graphics device passthrough work easy for VMM, that only
427
	 * need to expose ISA bridge to let driver know the real hardware
428
	 * underneath. This is a requirement from virtualization team.
4104 Serge 429
	 *
430
	 * In some virtualized environments (e.g. XEN), there is irrelevant
431
	 * ISA bridge in the system. To work reliably, we should scan trhough
432
	 * all the ISA bridge devices and check for the first match, instead
433
	 * of only checking the first one.
6084 serge 434
	 */
5060 serge 435
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
6084 serge 436
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
5060 serge 437
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
3243 Serge 438
			dev_priv->pch_id = id;
2326 Serge 439
 
6084 serge 440
			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
441
				dev_priv->pch_type = PCH_IBX;
442
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
3243 Serge 443
				WARN_ON(!IS_GEN5(dev));
6084 serge 444
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
445
				dev_priv->pch_type = PCH_CPT;
446
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
3243 Serge 447
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
6084 serge 448
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
449
				/* PantherPoint is CPT compatible */
450
				dev_priv->pch_type = PCH_CPT;
4560 Serge 451
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
3243 Serge 452
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
3031 serge 453
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
454
				dev_priv->pch_type = PCH_LPT;
455
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
6084 serge 456
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
457
				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
3243 Serge 458
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
459
				dev_priv->pch_type = PCH_LPT;
460
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
6084 serge 461
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
462
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
5354 serge 463
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
464
				dev_priv->pch_type = PCH_SPT;
465
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
6937 serge 466
				WARN_ON(!IS_SKYLAKE(dev) &&
467
					!IS_KABYLAKE(dev));
5354 serge 468
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
469
				dev_priv->pch_type = PCH_SPT;
470
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
6937 serge 471
				WARN_ON(!IS_SKYLAKE(dev) &&
472
					!IS_KABYLAKE(dev));
6320 serge 473
			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
474
				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
475
				    pch->subsystem_vendor == 0x1af4 &&
476
				    pch->subsystem_device == 0x1100)) {
6084 serge 477
				dev_priv->pch_type = intel_virt_detect_pch(dev);
5060 serge 478
			} else
479
				continue;
480
 
4104 Serge 481
			break;
6084 serge 482
		}
483
	}
4104 Serge 484
	if (!pch)
5060 serge 485
		DRM_DEBUG_KMS("No PCH found.\n");
486
 
487
//	pci_dev_put(pch);
2326 Serge 488
}
489
 
3031 serge 490
bool i915_semaphore_is_enabled(struct drm_device *dev)
2326 Serge 491
{
3031 serge 492
	if (INTEL_INFO(dev)->gen < 6)
4560 Serge 493
		return false;
2326 Serge 494
 
5060 serge 495
	if (i915.semaphores >= 0)
496
		return i915.semaphores;
497
 
5354 serge 498
	/* TODO: make semaphores and Execlists play nicely together */
499
	if (i915.enable_execlists)
500
		return false;
501
 
4560 Serge 502
	/* Until we get further testing... */
5060 serge 503
	if (IS_GEN8(dev))
4560 Serge 504
		return false;
505
 
3031 serge 506
#ifdef CONFIG_INTEL_IOMMU
507
	/* Enable semaphores on SNB when IO remapping is off */
508
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
509
		return false;
510
#endif
2326 Serge 511
 
4560 Serge 512
	return true;
2326 Serge 513
}
514
 
4104 Serge 515
#if 0
6084 serge 516
 
5060 serge 517
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
518
{
519
	struct drm_device *dev = dev_priv->dev;
6937 serge 520
	struct intel_encoder *encoder;
5060 serge 521
 
522
	drm_modeset_lock_all(dev);
6937 serge 523
	for_each_intel_encoder(dev, encoder)
524
		if (encoder->suspend)
525
			encoder->suspend(encoder);
5060 serge 526
	drm_modeset_unlock_all(dev);
527
}
528
 
5354 serge 529
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
530
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
531
			      bool rpm_resume);
6084 serge 532
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
5354 serge 533
 
6937 serge 534
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
535
{
536
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
537
	if (acpi_target_system_state() < ACPI_STATE_S3)
538
		return true;
539
#endif
540
	return false;
541
}
6084 serge 542
 
5354 serge 543
static int i915_drm_suspend(struct drm_device *dev)
4104 Serge 544
{
545
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 546
	pci_power_t opregion_target_state;
6084 serge 547
	int error;
2342 Serge 548
 
4104 Serge 549
	/* ignore lid events during suspend */
550
	mutex_lock(&dev_priv->modeset_restore_lock);
551
	dev_priv->modeset_restore = MODESET_SUSPENDED;
552
	mutex_unlock(&dev_priv->modeset_restore_lock);
2342 Serge 553
 
6937 serge 554
	disable_rpm_wakeref_asserts(dev_priv);
555
 
4104 Serge 556
	/* We do a lot of poking in a lot of registers, make sure they work
557
	 * properly. */
5060 serge 558
	intel_display_set_init_power(dev_priv, true);
2342 Serge 559
 
4104 Serge 560
	drm_kms_helper_poll_disable(dev);
2342 Serge 561
 
4104 Serge 562
	pci_save_state(dev->pdev);
2325 Serge 563
 
6084 serge 564
	error = i915_gem_suspend(dev);
565
	if (error) {
566
		dev_err(&dev->pdev->dev,
567
			"GEM idle failed, resume might fail\n");
6937 serge 568
		goto out;
6084 serge 569
	}
4104 Serge 570
 
6084 serge 571
	intel_guc_suspend(dev);
4104 Serge 572
 
6084 serge 573
	intel_suspend_gt_powersave(dev);
5354 serge 574
 
6084 serge 575
	/*
576
	 * Disable CRTCs directly since we want to preserve sw state
577
	 * for _thaw. Also, power gate the CRTC power wells.
578
	 */
579
	drm_modeset_lock_all(dev);
580
	intel_display_suspend(dev);
581
	drm_modeset_unlock_all(dev);
4104 Serge 582
 
6084 serge 583
	intel_dp_mst_suspend(dev);
5060 serge 584
 
6084 serge 585
	intel_runtime_pm_disable_interrupts(dev_priv);
586
	intel_hpd_cancel_work(dev_priv);
5060 serge 587
 
6084 serge 588
	intel_suspend_encoders(dev_priv);
5060 serge 589
 
6084 serge 590
	intel_suspend_hw(dev);
4104 Serge 591
 
4560 Serge 592
	i915_gem_suspend_gtt_mappings(dev);
593
 
4104 Serge 594
	i915_save_state(dev);
595
 
6937 serge 596
	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
5060 serge 597
	intel_opregion_notify_adapter(dev, opregion_target_state);
598
 
599
	intel_uncore_forcewake_reset(dev, false);
4104 Serge 600
	intel_opregion_fini(dev);
601
 
5354 serge 602
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
4104 Serge 603
 
5060 serge 604
	dev_priv->suspend_count++;
605
 
606
	intel_display_set_init_power(dev_priv, false);
607
 
6937 serge 608
	if (HAS_CSR(dev_priv))
609
		flush_work(&dev_priv->csr.work);
610
 
611
out:
612
	enable_rpm_wakeref_asserts(dev_priv);
613
 
614
	return error;
4104 Serge 615
}
616
 
6084 serge 617
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
2325 Serge 618
{
5354 serge 619
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
6937 serge 620
	bool fw_csr;
5354 serge 621
	int ret;
622
 
6937 serge 623
	disable_rpm_wakeref_asserts(dev_priv);
624
 
625
	fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
626
	/*
627
	 * In case of firmware assisted context save/restore don't manually
628
	 * deinit the power domains. This also means the CSR/DMC firmware will
629
	 * stay active, it will power down any HW resources as required and
630
	 * also enable deeper system power states that would be blocked if the
631
	 * firmware was inactive.
632
	 */
633
	if (!fw_csr)
634
		intel_power_domains_suspend(dev_priv);
635
 
5354 serge 636
	ret = intel_suspend_complete(dev_priv);
637
 
638
	if (ret) {
639
		DRM_ERROR("Suspend complete failed: %d\n", ret);
6937 serge 640
		if (!fw_csr)
641
			intel_power_domains_init_hw(dev_priv, true);
5354 serge 642
 
6937 serge 643
		goto out;
5354 serge 644
	}
645
 
646
	pci_disable_device(drm_dev->pdev);
6084 serge 647
	/*
648
	 * During hibernation on some platforms the BIOS may try to access
649
	 * the device even though it's already in D3 and hang the machine. So
650
	 * leave the device in D0 on those platforms and hope the BIOS will
651
	 * power down the device properly. The issue was seen on multiple old
652
	 * GENs with different BIOS vendors, so having an explicit blacklist
653
	 * is inpractical; apply the workaround on everything pre GEN6. The
654
	 * platforms where the issue was seen:
655
	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
656
	 * Fujitsu FSC S7110
657
	 * Acer Aspire 1830T
658
	 */
659
	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
660
		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
5354 serge 661
 
6937 serge 662
	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
663
 
664
out:
665
	enable_rpm_wakeref_asserts(dev_priv);
666
 
667
	return ret;
5354 serge 668
}
669
 
6084 serge 670
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
5354 serge 671
{
4104 Serge 672
	int error;
2325 Serge 673
 
4104 Serge 674
	if (!dev || !dev->dev_private) {
675
		DRM_ERROR("dev: %p\n", dev);
676
		DRM_ERROR("DRM not initialized, aborting suspend.\n");
677
		return -ENODEV;
678
	}
2325 Serge 679
 
5354 serge 680
	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
681
			 state.event != PM_EVENT_FREEZE))
682
		return -EINVAL;
3031 serge 683
 
4104 Serge 684
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
685
		return 0;
2325 Serge 686
 
5354 serge 687
	error = i915_drm_suspend(dev);
4104 Serge 688
	if (error)
689
		return error;
3031 serge 690
 
6084 serge 691
	return i915_drm_suspend_late(dev, false);
4104 Serge 692
}
2325 Serge 693
 
5354 serge 694
static int i915_drm_resume(struct drm_device *dev)
4104 Serge 695
{
5060 serge 696
	struct drm_i915_private *dev_priv = dev->dev_private;
3260 Serge 697
 
6937 serge 698
	disable_rpm_wakeref_asserts(dev_priv);
699
 
6084 serge 700
	mutex_lock(&dev->struct_mutex);
701
	i915_gem_restore_gtt_mappings(dev);
702
	mutex_unlock(&dev->struct_mutex);
4560 Serge 703
 
4104 Serge 704
	i915_restore_state(dev);
705
	intel_opregion_setup(dev);
706
 
6084 serge 707
	intel_init_pch_refclk(dev);
708
	drm_mode_config_reset(dev);
4104 Serge 709
 
6084 serge 710
	/*
711
	 * Interrupts have to be enabled before any batches are run. If not the
712
	 * GPU will hang. i915_gem_init_hw() will initiate batches to
713
	 * update/restore the context.
714
	 *
715
	 * Modeset enabling in intel_modeset_init_hw() also needs working
716
	 * interrupts.
717
	 */
718
	intel_runtime_pm_enable_interrupts(dev_priv);
4104 Serge 719
 
6084 serge 720
	mutex_lock(&dev->struct_mutex);
721
	if (i915_gem_init_hw(dev)) {
722
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
723
			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
724
	}
725
	mutex_unlock(&dev->struct_mutex);
4104 Serge 726
 
6084 serge 727
	intel_guc_resume(dev);
4104 Serge 728
 
6084 serge 729
	intel_modeset_init_hw(dev);
5060 serge 730
 
6084 serge 731
	spin_lock_irq(&dev_priv->irq_lock);
732
	if (dev_priv->display.hpd_irq_setup)
733
		dev_priv->display.hpd_irq_setup(dev);
734
	spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 735
 
6084 serge 736
	drm_modeset_lock_all(dev);
737
	intel_display_resume(dev);
738
	drm_modeset_unlock_all(dev);
5354 serge 739
 
6084 serge 740
	intel_dp_mst_resume(dev);
4104 Serge 741
 
6084 serge 742
	/*
743
	 * ... but also need to make sure that hotplug processing
744
	 * doesn't cause havoc. Like in the driver load code we don't
745
	 * bother with the tiny race here where we might loose hotplug
746
	 * notifications.
747
	 * */
748
	intel_hpd_init(dev_priv);
749
	/* Config may have changed between suspend and resume */
750
	drm_helper_hpd_irq_event(dev);
751
 
4104 Serge 752
	intel_opregion_init(dev);
753
 
5354 serge 754
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
4104 Serge 755
 
756
	mutex_lock(&dev_priv->modeset_restore_lock);
757
	dev_priv->modeset_restore = MODESET_DONE;
758
	mutex_unlock(&dev_priv->modeset_restore_lock);
4560 Serge 759
 
5060 serge 760
	intel_opregion_notify_adapter(dev, PCI_D0);
761
 
5354 serge 762
	drm_kms_helper_poll_enable(dev);
763
 
6937 serge 764
	enable_rpm_wakeref_asserts(dev_priv);
765
 
5060 serge 766
	return 0;
4104 Serge 767
}
768
 
5354 serge 769
static int i915_drm_resume_early(struct drm_device *dev)
4104 Serge 770
{
5354 serge 771
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 772
	int ret;
4104 Serge 773
 
5060 serge 774
	/*
775
	 * We have a resume ordering issue with the snd-hda driver also
776
	 * requiring our device to be power up. Due to the lack of a
777
	 * parent/child relationship we currently solve this with an early
778
	 * resume hook.
779
	 *
780
	 * FIXME: This should be solved with a special hdmi sink device or
781
	 * similar so that power domains can be employed.
782
	 */
4104 Serge 783
 
6937 serge 784
	/*
785
	 * Note that we need to set the power state explicitly, since we
786
	 * powered off the device during freeze and the PCI core won't power
787
	 * it back up for us during thaw. Powering off the device during
788
	 * freeze is not a hard requirement though, and during the
789
	 * suspend/resume phases the PCI core makes sure we get here with the
790
	 * device powered on. So in case we change our freeze logic and keep
791
	 * the device powered we can also remove the following set power state
792
	 * call.
793
	 */
794
	ret = pci_set_power_state(dev->pdev, PCI_D0);
795
	if (ret) {
796
		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
797
		goto out;
798
	}
799
 
800
	/*
801
	 * Note that pci_enable_device() first enables any parent bridge
802
	 * device and only then sets the power state for this device. The
803
	 * bridge enabling is a nop though, since bridge devices are resumed
804
	 * first. The order of enabling power and enabling the device is
805
	 * imposed by the PCI core as described above, so here we preserve the
806
	 * same order for the freeze/thaw phases.
807
	 *
808
	 * TODO: eventually we should remove pci_disable_device() /
809
	 * pci_enable_enable_device() from suspend/resume. Due to how they
810
	 * depend on the device enable refcount we can't anyway depend on them
811
	 * disabling/enabling the device.
812
	 */
813
	if (pci_enable_device(dev->pdev)) {
814
		ret = -EIO;
815
		goto out;
816
	}
817
 
4104 Serge 818
	pci_set_master(dev->pdev);
819
 
6937 serge 820
	disable_rpm_wakeref_asserts(dev_priv);
821
 
822
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5354 serge 823
		ret = vlv_resume_prepare(dev_priv, false);
824
	if (ret)
6084 serge 825
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
826
			  ret);
5354 serge 827
 
828
	intel_uncore_early_sanitize(dev, true);
829
 
6084 serge 830
	if (IS_BROXTON(dev))
831
		ret = bxt_resume_prepare(dev_priv);
832
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5354 serge 833
		hsw_disable_pc8(dev_priv);
834
 
835
	intel_uncore_sanitize(dev);
836
 
6937 serge 837
	if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
838
		intel_power_domains_init_hw(dev_priv, true);
839
 
840
out:
841
	dev_priv->suspended_to_idle = false;
842
 
843
	enable_rpm_wakeref_asserts(dev_priv);
844
 
5354 serge 845
	return ret;
5060 serge 846
}
847
 
6084 serge 848
int i915_resume_switcheroo(struct drm_device *dev)
5060 serge 849
{
850
	int ret;
851
 
5354 serge 852
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
853
		return 0;
854
 
855
	ret = i915_drm_resume_early(dev);
4104 Serge 856
	if (ret)
857
		return ret;
858
 
5354 serge 859
	return i915_drm_resume(dev);
4104 Serge 860
}
861
 
862
/**
863
 * i915_reset - reset chip after a hang
864
 * @dev: drm device to reset
865
 *
866
 * Reset the chip.  Useful if a hang is detected. Returns zero on successful
867
 * reset or otherwise an error code.
868
 *
869
 * Procedure is fairly simple:
870
 *   - reset the chip using the reset reg
871
 *   - re-init context state
872
 *   - re-init hardware status page
873
 *   - re-init ring buffer
874
 *   - re-init interrupt state
875
 *   - re-init display
876
 */
877
int i915_reset(struct drm_device *dev)
878
{
5060 serge 879
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 880
	bool simulated;
881
	int ret;
882
 
6084 serge 883
	intel_reset_gt_powersave(dev);
4104 Serge 884
 
885
	mutex_lock(&dev->struct_mutex);
886
 
887
	i915_gem_reset(dev);
888
 
889
	simulated = dev_priv->gpu_error.stop_rings != 0;
890
 
6084 serge 891
	ret = intel_gpu_reset(dev);
4104 Serge 892
 
6084 serge 893
	/* Also reset the gpu hangman. */
894
	if (simulated) {
895
		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
896
		dev_priv->gpu_error.stop_rings = 0;
897
		if (ret == -ENODEV) {
4560 Serge 898
			DRM_INFO("Reset not implemented, but ignoring "
6084 serge 899
				 "error for simulated gpu hangs\n");
900
			ret = 0;
901
		}
4104 Serge 902
	}
4560 Serge 903
 
5354 serge 904
	if (i915_stop_ring_allow_warn(dev_priv))
905
		pr_notice("drm/i915: Resetting chip after gpu hang\n");
906
 
4104 Serge 907
	if (ret) {
4560 Serge 908
		DRM_ERROR("Failed to reset chip: %i\n", ret);
4104 Serge 909
		mutex_unlock(&dev->struct_mutex);
910
		return ret;
911
	}
912
 
6937 serge 913
	intel_overlay_reset(dev_priv);
914
 
4104 Serge 915
	/* Ok, now get things going again... */
916
 
917
	/*
918
	 * Everything depends on having the GTT running, so we need to start
919
	 * there.  Fortunately we don't need to do this unless we reset the
920
	 * chip at a PCI level.
921
	 *
922
	 * Next we need to restore the context, but we don't use those
923
	 * yet either...
924
	 *
925
	 * Ring buffer needs to be re-initialized in the KMS case, or if X
926
	 * was running at the time of the reset (i.e. we weren't VT
927
	 * switched away).
928
	 */
929
 
6084 serge 930
	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
931
	dev_priv->gpu_error.reload_in_reset = true;
5354 serge 932
 
6084 serge 933
	ret = i915_gem_init_hw(dev);
5354 serge 934
 
6084 serge 935
	dev_priv->gpu_error.reload_in_reset = false;
4104 Serge 936
 
6084 serge 937
	mutex_unlock(&dev->struct_mutex);
938
	if (ret) {
939
		DRM_ERROR("Failed hw init on reset %d\n", ret);
940
		return ret;
4104 Serge 941
	}
942
 
6084 serge 943
	/*
944
	 * rps/rc6 re-init is necessary to restore state lost after the
945
	 * reset and the re-install of gt irqs. Skip for ironlake per
946
	 * previous concerns that it doesn't respond well to some forms
947
	 * of re-init after reset.
948
	 */
949
	if (INTEL_INFO(dev)->gen > 5)
950
		intel_enable_gt_powersave(dev);
951
 
4104 Serge 952
	return 0;
953
}
954
 
955
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
956
{
957
	struct intel_device_info *intel_info =
958
		(struct intel_device_info *) ent->driver_data;
959
 
5060 serge 960
	if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
4560 Serge 961
		DRM_INFO("This hardware requires preliminary hardware support.\n"
962
			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
963
		return -ENODEV;
964
	}
965
 
4104 Serge 966
	/* Only bind to function 0 of the device. Early generations
967
	 * used function 1 as a placeholder for multi-head. This causes
968
	 * us confusion instead, especially on the systems where both
969
	 * functions have the same PCI-ID!
970
	 */
971
	if (PCI_FUNC(pdev->devfn))
972
		return -ENODEV;
973
 
974
	return drm_get_pci_dev(pdev, ent, &driver);
975
}
976
 
977
static void
978
i915_pci_remove(struct pci_dev *pdev)
979
{
980
	struct drm_device *dev = pci_get_drvdata(pdev);
981
 
982
	drm_put_dev(dev);
983
}
984
 
985
static int i915_pm_suspend(struct device *dev)
986
{
987
	struct pci_dev *pdev = to_pci_dev(dev);
988
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
989
 
990
	if (!drm_dev || !drm_dev->dev_private) {
991
		dev_err(dev, "DRM not initialized, aborting suspend.\n");
992
		return -ENODEV;
993
	}
994
 
995
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
996
		return 0;
997
 
5354 serge 998
	return i915_drm_suspend(drm_dev);
5060 serge 999
}
4104 Serge 1000
 
5060 serge 1001
static int i915_pm_suspend_late(struct device *dev)
1002
{
6084 serge 1003
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
5060 serge 1004
 
1005
	/*
6084 serge 1006
	 * We have a suspend ordering issue with the snd-hda driver also
5060 serge 1007
	 * requiring our device to be power up. Due to the lack of a
1008
	 * parent/child relationship we currently solve this with an late
1009
	 * suspend hook.
1010
	 *
1011
	 * FIXME: This should be solved with a special hdmi sink device or
1012
	 * similar so that power domains can be employed.
1013
	 */
1014
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1015
		return 0;
1016
 
6084 serge 1017
	return i915_drm_suspend_late(drm_dev, false);
4104 Serge 1018
}
1019
 
6084 serge 1020
static int i915_pm_poweroff_late(struct device *dev)
1021
{
1022
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1023
 
1024
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1025
		return 0;
1026
 
1027
	return i915_drm_suspend_late(drm_dev, true);
1028
}
1029
 
5060 serge 1030
static int i915_pm_resume_early(struct device *dev)
1031
{
6084 serge 1032
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
5060 serge 1033
 
5354 serge 1034
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1035
		return 0;
1036
 
1037
	return i915_drm_resume_early(drm_dev);
5060 serge 1038
}
1039
 
4104 Serge 1040
static int i915_pm_resume(struct device *dev)
1041
{
6084 serge 1042
	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
4104 Serge 1043
 
5354 serge 1044
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1045
		return 0;
4104 Serge 1046
 
5354 serge 1047
	return i915_drm_resume(drm_dev);
4104 Serge 1048
}
1049
 
5354 serge 1050
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 1051
{
1052
	hsw_enable_pc8(dev_priv);
4560 Serge 1053
 
5060 serge 1054
	return 0;
1055
}
1056
 
6084 serge 1057
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
1058
{
1059
	struct drm_device *dev = dev_priv->dev;
1060
 
1061
	/* TODO: when DC5 support is added disable DC5 here. */
1062
 
1063
	broxton_ddi_phy_uninit(dev);
1064
	broxton_uninit_cdclk(dev);
1065
	bxt_enable_dc9(dev_priv);
1066
 
1067
	return 0;
1068
}
1069
 
1070
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1071
{
1072
	struct drm_device *dev = dev_priv->dev;
1073
 
1074
	/* TODO: when CSR FW support is added make sure the FW is loaded */
1075
 
1076
	bxt_disable_dc9(dev_priv);
1077
 
1078
	/*
1079
	 * TODO: when DC5 support is added enable DC5 here if the CSR FW
1080
	 * is available.
1081
	 */
1082
	broxton_init_cdclk(dev);
1083
	broxton_ddi_phy_init(dev);
1084
	intel_prepare_ddi(dev);
1085
 
1086
	return 0;
1087
}
1088
 
5060 serge 1089
/*
1090
 * Save all Gunit registers that may be lost after a D3 and a subsequent
1091
 * S0i[R123] transition. The list of registers needing a save/restore is
1092
 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1093
 * registers in the following way:
1094
 * - Driver: saved/restored by the driver
1095
 * - Punit : saved/restored by the Punit firmware
1096
 * - No, w/o marking: no need to save/restore, since the register is R/O or
1097
 *                    used internally by the HW in a way that doesn't depend
1098
 *                    keeping the content across a suspend/resume.
1099
 * - Debug : used for debugging
1100
 *
1101
 * We save/restore all registers marked with 'Driver', with the following
1102
 * exceptions:
1103
 * - Registers out of use, including also registers marked with 'Debug'.
1104
 *   These have no effect on the driver's operation, so we don't save/restore
1105
 *   them to reduce the overhead.
1106
 * - Registers that are fully setup by an initialization function called from
1107
 *   the resume path. For example many clock gating and RPS/RC6 registers.
1108
 * - Registers that provide the right functionality with their reset defaults.
1109
 *
1110
 * TODO: Except for registers that based on the above 3 criteria can be safely
1111
 * ignored, we save/restore all others, practically treating the HW context as
1112
 * a black-box for the driver. Further investigation is needed to reduce the
1113
 * saved/restored registers even further, by following the same 3 criteria.
1114
 */
1115
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1116
{
1117
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1118
	int i;
1119
 
1120
	/* GAM 0x4000-0x4770 */
1121
	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
1122
	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
1123
	s->arb_mode		= I915_READ(ARB_MODE);
1124
	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
1125
	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
1126
 
1127
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
6084 serge 1128
		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
5060 serge 1129
 
1130
	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
6084 serge 1131
	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
5060 serge 1132
 
1133
	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
1134
	s->ecochk		= I915_READ(GAM_ECOCHK);
1135
	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
1136
	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
1137
 
1138
	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
1139
 
1140
	/* MBC 0x9024-0x91D0, 0x8500 */
1141
	s->g3dctl		= I915_READ(VLV_G3DCTL);
1142
	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
1143
	s->mbctl		= I915_READ(GEN6_MBCTL);
1144
 
1145
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1146
	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
1147
	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
1148
	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
1149
	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
1150
	s->rstctl		= I915_READ(GEN6_RSTCTL);
1151
	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
1152
 
1153
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1154
	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
1155
	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
1156
	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
1157
	s->ecobus		= I915_READ(ECOBUS);
1158
	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
1159
	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
1160
	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
1161
	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
1162
	s->rcedata		= I915_READ(VLV_RCEDATA);
1163
	s->spare2gh		= I915_READ(VLV_SPAREG2H);
1164
 
1165
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1166
	s->gt_imr		= I915_READ(GTIMR);
1167
	s->gt_ier		= I915_READ(GTIER);
1168
	s->pm_imr		= I915_READ(GEN6_PMIMR);
1169
	s->pm_ier		= I915_READ(GEN6_PMIER);
1170
 
1171
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
6084 serge 1172
		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
5060 serge 1173
 
1174
	/* GT SA CZ domain, 0x100000-0x138124 */
1175
	s->tilectl		= I915_READ(TILECTL);
1176
	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
1177
	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
1178
	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1179
	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
1180
 
1181
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1182
	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
1183
	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
6084 serge 1184
	s->pcbr			= I915_READ(VLV_PCBR);
5060 serge 1185
	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
1186
 
1187
	/*
1188
	 * Not saving any of:
1189
	 * DFT,		0x9800-0x9EC0
1190
	 * SARB,	0xB000-0xB1FC
1191
	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
1192
	 * PCI CFG
1193
	 */
1194
}
1195
 
1196
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1197
{
1198
	struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1199
	u32 val;
1200
	int i;
1201
 
1202
	/* GAM 0x4000-0x4770 */
1203
	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
1204
	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
1205
	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
1206
	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
1207
	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
1208
 
1209
	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
6084 serge 1210
		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
5060 serge 1211
 
1212
	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
6084 serge 1213
	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
5060 serge 1214
 
1215
	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
1216
	I915_WRITE(GAM_ECOCHK,		s->ecochk);
1217
	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
1218
	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
1219
 
1220
	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
1221
 
1222
	/* MBC 0x9024-0x91D0, 0x8500 */
1223
	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
1224
	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
1225
	I915_WRITE(GEN6_MBCTL,		s->mbctl);
1226
 
1227
	/* GCP 0x9400-0x9424, 0x8100-0x810C */
1228
	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
1229
	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
1230
	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
1231
	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
1232
	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
1233
	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
1234
 
1235
	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1236
	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
1237
	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
1238
	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
1239
	I915_WRITE(ECOBUS,		s->ecobus);
1240
	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
1241
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1242
	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
1243
	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
1244
	I915_WRITE(VLV_RCEDATA,		s->rcedata);
1245
	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
1246
 
1247
	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1248
	I915_WRITE(GTIMR,		s->gt_imr);
1249
	I915_WRITE(GTIER,		s->gt_ier);
1250
	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
1251
	I915_WRITE(GEN6_PMIER,		s->pm_ier);
1252
 
1253
	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
6084 serge 1254
		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
5060 serge 1255
 
1256
	/* GT SA CZ domain, 0x100000-0x138124 */
1257
	I915_WRITE(TILECTL,			s->tilectl);
1258
	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
1259
	/*
1260
	 * Preserve the GT allow wake and GFX force clock bit, they are not
1261
	 * be restored, as they are used to control the s0ix suspend/resume
1262
	 * sequence by the caller.
1263
	 */
1264
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1265
	val &= VLV_GTLC_ALLOWWAKEREQ;
1266
	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1267
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1268
 
1269
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1270
	val &= VLV_GFX_CLK_FORCE_ON_BIT;
1271
	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1272
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1273
 
1274
	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
1275
 
1276
	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
1277
	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
1278
	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
6084 serge 1279
	I915_WRITE(VLV_PCBR,			s->pcbr);
5060 serge 1280
	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
1281
}
4104 Serge 1282
#endif
1283
 
5060 serge 1284
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1285
{
1286
	u32 val;
1287
	int err;
1288
 
1289
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1290
 
1291
	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1292
	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1293
	if (force_on)
1294
		val |= VLV_GFX_CLK_FORCE_ON_BIT;
1295
	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1296
 
1297
	if (!force_on)
1298
		return 0;
1299
 
1300
	err = wait_for(COND, 20);
1301
	if (err)
1302
		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1303
			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1304
 
1305
	return err;
1306
#undef COND
1307
}
1308
#if 0
1309
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1310
{
1311
	u32 val;
1312
	int err = 0;
1313
 
1314
	val = I915_READ(VLV_GTLC_WAKE_CTRL);
1315
	val &= ~VLV_GTLC_ALLOWWAKEREQ;
1316
	if (allow)
1317
		val |= VLV_GTLC_ALLOWWAKEREQ;
1318
	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1319
	POSTING_READ(VLV_GTLC_WAKE_CTRL);
1320
 
1321
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1322
	      allow)
1323
	err = wait_for(COND, 1);
1324
	if (err)
1325
		DRM_ERROR("timeout disabling GT waking\n");
1326
	return err;
1327
#undef COND
1328
}
1329
 
1330
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1331
				 bool wait_for_on)
1332
{
1333
	u32 mask;
1334
	u32 val;
1335
	int err;
1336
 
1337
	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1338
	val = wait_for_on ? mask : 0;
1339
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1340
	if (COND)
1341
		return 0;
1342
 
1343
	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1344
			wait_for_on ? "on" : "off",
1345
			I915_READ(VLV_GTLC_PW_STATUS));
1346
 
1347
	/*
1348
	 * RC6 transitioning can be delayed up to 2 msec (see
1349
	 * valleyview_enable_rps), use 3 msec for safety.
1350
	 */
1351
	err = wait_for(COND, 3);
1352
	if (err)
1353
		DRM_ERROR("timeout waiting for GT wells to go %s\n",
1354
			  wait_for_on ? "on" : "off");
1355
 
1356
	return err;
1357
#undef COND
1358
}
1359
 
1360
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1361
{
1362
	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1363
		return;
1364
 
1365
	DRM_ERROR("GT register access while GT waking disabled\n");
1366
	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1367
}
1368
 
5354 serge 1369
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
5060 serge 1370
{
1371
	u32 mask;
1372
	int err;
1373
 
1374
	/*
1375
	 * Bspec defines the following GT well on flags as debug only, so
1376
	 * don't treat them as hard failures.
1377
	 */
1378
	(void)vlv_wait_for_gt_wells(dev_priv, false);
1379
 
1380
	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1381
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1382
 
1383
	vlv_check_no_gt_access(dev_priv);
1384
 
1385
	err = vlv_force_gfx_clock(dev_priv, true);
1386
	if (err)
1387
		goto err1;
1388
 
1389
	err = vlv_allow_gt_wake(dev_priv, false);
1390
	if (err)
1391
		goto err2;
1392
 
6084 serge 1393
	if (!IS_CHERRYVIEW(dev_priv->dev))
1394
		vlv_save_gunit_s0ix_state(dev_priv);
1395
 
5060 serge 1396
	err = vlv_force_gfx_clock(dev_priv, false);
1397
	if (err)
1398
		goto err2;
1399
 
1400
	return 0;
1401
 
1402
err2:
1403
	/* For safety always re-enable waking and disable gfx clock forcing */
1404
	vlv_allow_gt_wake(dev_priv, true);
1405
err1:
1406
	vlv_force_gfx_clock(dev_priv, false);
1407
 
1408
	return err;
1409
}
1410
 
5354 serge 1411
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1412
				bool rpm_resume)
5060 serge 1413
{
1414
	struct drm_device *dev = dev_priv->dev;
1415
	int err;
1416
	int ret;
1417
 
1418
	/*
1419
	 * If any of the steps fail just try to continue, that's the best we
1420
	 * can do at this point. Return the first error code (which will also
1421
	 * leave RPM permanently disabled).
1422
	 */
1423
	ret = vlv_force_gfx_clock(dev_priv, true);
1424
 
6084 serge 1425
	if (!IS_CHERRYVIEW(dev_priv->dev))
1426
		vlv_restore_gunit_s0ix_state(dev_priv);
5060 serge 1427
 
1428
	err = vlv_allow_gt_wake(dev_priv, true);
1429
	if (!ret)
1430
		ret = err;
1431
 
1432
	err = vlv_force_gfx_clock(dev_priv, false);
1433
	if (!ret)
1434
		ret = err;
1435
 
1436
	vlv_check_no_gt_access(dev_priv);
1437
 
5354 serge 1438
	if (rpm_resume) {
6084 serge 1439
		intel_init_clock_gating(dev);
1440
		i915_gem_restore_fences(dev);
5354 serge 1441
	}
5060 serge 1442
 
1443
	return ret;
1444
}
1445
 
1446
static int intel_runtime_suspend(struct device *device)
1447
{
1448
	struct pci_dev *pdev = to_pci_dev(device);
1449
	struct drm_device *dev = pci_get_drvdata(pdev);
1450
	struct drm_i915_private *dev_priv = dev->dev_private;
1451
	int ret;
1452
 
1453
	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1454
		return -ENODEV;
1455
 
5354 serge 1456
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1457
		return -ENODEV;
1458
 
5060 serge 1459
	DRM_DEBUG_KMS("Suspending device\n");
1460
 
1461
	/*
1462
	 * We could deadlock here in case another thread holding struct_mutex
1463
	 * calls RPM suspend concurrently, since the RPM suspend will wait
1464
	 * first for this RPM suspend to finish. In this case the concurrent
1465
	 * RPM resume will be followed by its RPM suspend counterpart. Still
1466
	 * for consistency return -EAGAIN, which will reschedule this suspend.
1467
	 */
1468
	if (!mutex_trylock(&dev->struct_mutex)) {
1469
		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1470
		/*
1471
		 * Bump the expiration timestamp, otherwise the suspend won't
1472
		 * be rescheduled.
1473
		 */
1474
		pm_runtime_mark_last_busy(device);
1475
 
1476
		return -EAGAIN;
1477
	}
6937 serge 1478
 
1479
	disable_rpm_wakeref_asserts(dev_priv);
1480
 
5060 serge 1481
	/*
1482
	 * We are safe here against re-faults, since the fault handler takes
1483
	 * an RPM reference.
1484
	 */
1485
	i915_gem_release_all_mmaps(dev_priv);
1486
	mutex_unlock(&dev->struct_mutex);
1487
 
6937 serge 1488
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1489
 
6084 serge 1490
	intel_guc_suspend(dev);
1491
 
5354 serge 1492
	intel_suspend_gt_powersave(dev);
1493
	intel_runtime_pm_disable_interrupts(dev_priv);
5060 serge 1494
 
5354 serge 1495
	ret = intel_suspend_complete(dev_priv);
5060 serge 1496
	if (ret) {
1497
		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
5354 serge 1498
		intel_runtime_pm_enable_interrupts(dev_priv);
5060 serge 1499
 
6937 serge 1500
		enable_rpm_wakeref_asserts(dev_priv);
1501
 
5060 serge 1502
		return ret;
1503
	}
1504
 
6084 serge 1505
	intel_uncore_forcewake_reset(dev, false);
6937 serge 1506
 
1507
	enable_rpm_wakeref_asserts(dev_priv);
1508
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
5060 serge 1509
	dev_priv->pm.suspended = true;
1510
 
1511
	/*
5354 serge 1512
	 * FIXME: We really should find a document that references the arguments
1513
	 * used below!
1514
	 */
6084 serge 1515
	if (IS_BROADWELL(dev)) {
5354 serge 1516
		/*
1517
		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1518
		 * being detected, and the call we do at intel_runtime_resume()
1519
		 * won't be able to restore them. Since PCI_D3hot matches the
6084 serge 1520
		 * actual specification and appears to be working, use it.
5354 serge 1521
		 */
1522
		intel_opregion_notify_adapter(dev, PCI_D3hot);
6084 serge 1523
	} else {
1524
		/*
1525
		 * current versions of firmware which depend on this opregion
1526
		 * notification have repurposed the D1 definition to mean
1527
		 * "runtime suspended" vs. what you would normally expect (D3)
1528
		 * to distinguish it from notifications that might be sent via
1529
		 * the suspend path.
1530
		 */
1531
		intel_opregion_notify_adapter(dev, PCI_D1);
5354 serge 1532
	}
5060 serge 1533
 
6084 serge 1534
	assert_forcewakes_inactive(dev_priv);
1535
 
5060 serge 1536
	DRM_DEBUG_KMS("Device suspended\n");
1537
	return 0;
1538
}
1539
 
1540
static int intel_runtime_resume(struct device *device)
1541
{
1542
	struct pci_dev *pdev = to_pci_dev(device);
1543
	struct drm_device *dev = pci_get_drvdata(pdev);
1544
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 1545
	int ret = 0;
5060 serge 1546
 
5354 serge 1547
	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1548
		return -ENODEV;
5060 serge 1549
 
1550
	DRM_DEBUG_KMS("Resuming device\n");
1551
 
6937 serge 1552
	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
1553
	disable_rpm_wakeref_asserts(dev_priv);
1554
 
5060 serge 1555
	intel_opregion_notify_adapter(dev, PCI_D0);
1556
	dev_priv->pm.suspended = false;
1557
 
6084 serge 1558
	intel_guc_resume(dev);
1559
 
5354 serge 1560
	if (IS_GEN6(dev_priv))
1561
		intel_init_pch_refclk(dev);
6084 serge 1562
 
1563
	if (IS_BROXTON(dev))
1564
		ret = bxt_resume_prepare(dev_priv);
5354 serge 1565
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1566
		hsw_disable_pc8(dev_priv);
6937 serge 1567
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5354 serge 1568
		ret = vlv_resume_prepare(dev_priv, true);
5060 serge 1569
 
1570
	/*
1571
	 * No point of rolling back things in case of an error, as the best
1572
	 * we can do is to hope that things will still work (and disable RPM).
1573
	 */
1574
	i915_gem_init_swizzling(dev);
1575
	gen6_update_ring_freq(dev);
1576
 
5354 serge 1577
	intel_runtime_pm_enable_interrupts(dev_priv);
6084 serge 1578
 
1579
	/*
1580
	 * On VLV/CHV display interrupts are part of the display
1581
	 * power well, so hpd is reinitialized from there. For
1582
	 * everyone else do it here.
1583
	 */
6937 serge 1584
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
6084 serge 1585
		intel_hpd_init(dev_priv);
1586
 
5354 serge 1587
	intel_enable_gt_powersave(dev);
5060 serge 1588
 
6937 serge 1589
	enable_rpm_wakeref_asserts(dev_priv);
1590
 
5060 serge 1591
	if (ret)
1592
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1593
	else
1594
		DRM_DEBUG_KMS("Device resumed\n");
1595
 
1596
	return ret;
1597
}
1598
 
5354 serge 1599
/*
1600
 * This function implements common functionality of runtime and system
1601
 * suspend sequence.
1602
 */
1603
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1604
{
1605
	int ret;
1606
 
6084 serge 1607
	if (IS_BROXTON(dev_priv))
1608
		ret = bxt_suspend_complete(dev_priv);
1609
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5354 serge 1610
		ret = hsw_suspend_complete(dev_priv);
6937 serge 1611
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5354 serge 1612
		ret = vlv_suspend_complete(dev_priv);
1613
	else
1614
		ret = 0;
1615
 
1616
	return ret;
1617
}
1618
 
5060 serge 1619
static const struct dev_pm_ops i915_pm_ops = {
5354 serge 1620
	/*
1621
	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1622
	 * PMSG_RESUME]
1623
	 */
5060 serge 1624
	.suspend = i915_pm_suspend,
1625
	.suspend_late = i915_pm_suspend_late,
1626
	.resume_early = i915_pm_resume_early,
1627
	.resume = i915_pm_resume,
5354 serge 1628
 
1629
	/*
1630
	 * S4 event handlers
1631
	 * @freeze, @freeze_late    : called (1) before creating the
1632
	 *                            hibernation image [PMSG_FREEZE] and
1633
	 *                            (2) after rebooting, before restoring
1634
	 *                            the image [PMSG_QUIESCE]
1635
	 * @thaw, @thaw_early       : called (1) after creating the hibernation
1636
	 *                            image, before writing it [PMSG_THAW]
1637
	 *                            and (2) after failing to create or
1638
	 *                            restore the image [PMSG_RECOVER]
1639
	 * @poweroff, @poweroff_late: called after writing the hibernation
1640
	 *                            image, before rebooting [PMSG_HIBERNATE]
1641
	 * @restore, @restore_early : called after rebooting and restoring the
1642
	 *                            hibernation image [PMSG_RESTORE]
1643
	 */
1644
	.freeze = i915_pm_suspend,
1645
	.freeze_late = i915_pm_suspend_late,
1646
	.thaw_early = i915_pm_resume_early,
1647
	.thaw = i915_pm_resume,
1648
	.poweroff = i915_pm_suspend,
6084 serge 1649
	.poweroff_late = i915_pm_poweroff_late,
5060 serge 1650
	.restore_early = i915_pm_resume_early,
1651
	.restore = i915_pm_resume,
5354 serge 1652
 
1653
	/* S0ix (via runtime suspend) event handlers */
5060 serge 1654
	.runtime_suspend = intel_runtime_suspend,
1655
	.runtime_resume = intel_runtime_resume,
1656
};
1657
 
1658
static const struct vm_operations_struct i915_gem_vm_ops = {
1659
	.fault = i915_gem_fault,
1660
	.open = drm_gem_vm_open,
1661
	.close = drm_gem_vm_close,
1662
};
1663
 
1664
static const struct file_operations i915_driver_fops = {
1665
	.owner = THIS_MODULE,
1666
	.open = drm_open,
1667
	.release = drm_release,
1668
	.unlocked_ioctl = drm_ioctl,
1669
	.mmap = drm_gem_mmap,
1670
	.poll = drm_poll,
1671
	.read = drm_read,
1672
#ifdef CONFIG_COMPAT
1673
	.compat_ioctl = i915_compat_ioctl,
1674
#endif
1675
	.llseek = noop_llseek,
1676
};
1677
#endif
1678
 
3260 Serge 1679
static struct drm_driver driver = {
6084 serge 1680
	/* Don't use MTRRs here; the Xserver or userspace app should
1681
	 * deal with them for Intel hardware.
1682
	 */
1683
	.driver_features =
4104 Serge 1684
	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
6084 serge 1685
	    DRIVER_RENDER | DRIVER_MODESET,
1686
	.load = i915_driver_load,
3260 Serge 1687
//    .unload = i915_driver_unload,
3263 Serge 1688
      .open = i915_driver_open,
3260 Serge 1689
//    .lastclose = i915_driver_lastclose,
1690
//    .preclose = i915_driver_preclose,
1691
//    .postclose = i915_driver_postclose,
6084 serge 1692
//	.set_busid = drm_pci_set_busid,
3260 Serge 1693
 
4104 Serge 1694
#if defined(CONFIG_DEBUG_FS)
1695
	.debugfs_init = i915_debugfs_init,
1696
	.debugfs_cleanup = i915_debugfs_cleanup,
1697
#endif
3260 Serge 1698
    .gem_free_object = i915_gem_free_object,
1699
 
1700
//    .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1701
//    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1702
//    .gem_prime_export = i915_gem_prime_export,
1703
//    .gem_prime_import = i915_gem_prime_import,
1704
 
1705
//    .dumb_create = i915_gem_dumb_create,
1706
//    .dumb_map_offset = i915_gem_mmap_gtt,
1707
//    .dumb_destroy = i915_gem_dumb_destroy,
1708
//    .ioctls = i915_ioctls,
1709
//    .fops = &i915_driver_fops,
1710
//    .name = DRIVER_NAME,
1711
//    .desc = DRIVER_DESC,
1712
//    .date = DRIVER_DATE,
1713
//    .major = DRIVER_MAJOR,
1714
//    .minor = DRIVER_MINOR,
1715
//    .patchlevel = DRIVER_PATCHLEVEL,
1716
};
1717
 
1718
 
3243 Serge 1719
 
3255 Serge 1720
 
4104 Serge 1721
int i915_init(void)
1722
{
1723
    static pci_dev_t device;
1724
    const struct pci_device_id  *ent;
1725
    int  err;
2325 Serge 1726
 
4104 Serge 1727
    ent = find_pci_device(&device, pciidlist);
1728
    if( unlikely(ent == NULL) )
1729
    {
1730
        dbgprintf("device not found\n");
1731
        return -ENODEV;
1732
    };
2325 Serge 1733
 
4104 Serge 1734
    drm_core_init();
3255 Serge 1735
 
4104 Serge 1736
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1737
                                device.pci_dev.device);
2325 Serge 1738
 
6937 serge 1739
    driver.driver_features |= DRIVER_MODESET+DRIVER_ATOMIC;
4293 Serge 1740
 
4104 Serge 1741
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
3263 Serge 1742
 
4104 Serge 1743
    return err;
1744
}
2325 Serge 1745
 
2330 Serge 1746
 
6084 serge 1747
MODULE_AUTHOR("Tungsten Graphics, Inc.");
1748
MODULE_AUTHOR("Intel Corporation");
2325 Serge 1749
 
6084 serge 1750
MODULE_DESCRIPTION(DRIVER_DESC);
1751
MODULE_LICENSE("GPL and additional rights");